code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
def UpperCamelCase( __UpperCamelCase : List[Any] ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
lowerCAmelCase_ : List[Any] = len(__UpperCamelCase )
lowerCAmelCase_ : Any = max(__UpperCamelCase )
lowerCAmelCase_ : str = min(__UpperCamelCase )
# create the counting array
lowerCAmelCase_ : Union[str, Any] = coll_max + 1 - coll_min
lowerCAmelCase_ : List[str] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 ,__UpperCamelCase ):
lowerCAmelCase_ : Optional[Any] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
lowerCAmelCase_ : List[str] = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 ,__UpperCamelCase ) ):
lowerCAmelCase_ : Dict = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def UpperCamelCase( __UpperCamelCase : Dict ):
return "".join([chr(__UpperCamelCase ) for i in counting_sort([ord(__UpperCamelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
A__ : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
A__ : Dict = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 103 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = cos(_lowerCAmelCase )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : Any = (1 - _cos) / 2
UpperCAmelCase : List[Any] = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : Dict = 1 - alpha
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : Any = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(_lowerCAmelCase )
UpperCAmelCase : Tuple = cos(_lowerCAmelCase )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Tuple = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : Optional[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : Optional[int] = sin(_lowerCAmelCase )
UpperCAmelCase : Tuple = cos(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = _sin / 2
UpperCAmelCase : Any = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : Optional[Any] = 1 + alpha
UpperCAmelCase : List[Any] = -2 * _cos
UpperCAmelCase : Optional[Any] = 1 - alpha
UpperCAmelCase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : List[str] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(_lowerCAmelCase )
UpperCAmelCase : str = cos(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 1 - alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : float = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase : Optional[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = cos(_lowerCAmelCase )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : str = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Union[str, Any] = -2 * _cos
UpperCAmelCase : Optional[Any] = 1 - alpha * big_a
UpperCAmelCase : Union[str, Any] = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : Any = 1 - alpha / big_a
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : float = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase : Any = tau * frequency / samplerate
UpperCAmelCase : Optional[int] = sin(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = cos(_lowerCAmelCase )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : Optional[int] = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : int = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Optional[int] = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : str = 2 * sqrt(_lowerCAmelCase ) * alpha
UpperCAmelCase : Dict = big_a * (pmc + aaa)
UpperCAmelCase : Any = 2 * big_a * mpc
UpperCAmelCase : Union[str, Any] = big_a * (pmc - aaa)
UpperCAmelCase : Optional[int] = ppmc + aaa
UpperCAmelCase : Optional[Any] = -2 * pmpc
UpperCAmelCase : Optional[Any] = ppmc - aaa
UpperCAmelCase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : float = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = cos(_lowerCAmelCase )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : int = 10 ** (gain_db / 40)
UpperCAmelCase : List[str] = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Union[str, Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(_lowerCAmelCase ) * alpha
UpperCAmelCase : Any = big_a * (ppmc + aaa)
UpperCAmelCase : str = -2 * big_a * pmpc
UpperCAmelCase : List[Any] = big_a * (ppmc - aaa)
UpperCAmelCase : Optional[Any] = pmc + aaa
UpperCAmelCase : Any = 2 * mpc
UpperCAmelCase : str = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 23 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCAmelCase_ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = FlaxAutoencoderKL
@property
def UpperCamelCase__ ( self ):
snake_case_ = 4
snake_case_ = 3
snake_case_ = (32, 32)
snake_case_ = jax.random.PRNGKey(0 )
snake_case_ = jax.random.uniform(_UpperCAmelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def UpperCamelCase__ ( self ):
snake_case_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict | 267 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
UpperCAmelCase = False
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__ ( self ):
return 12
@property
def UpperCamelCase__ ( self ):
return 12
@property
def UpperCamelCase__ ( self ):
return 32
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCamelCase__ ( self ):
snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(_UpperCAmelCase )
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = 12
snake_case_ = 12
snake_case_ = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
snake_case_ = TransformeraDModel(**_UpperCAmelCase )
return model
def UpperCamelCase__ ( self ):
snake_case_ = '''cpu'''
snake_case_ = self.dummy_vqvae
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_transformer
snake_case_ = VQDiffusionScheduler(self.num_embed )
snake_case_ = LearnedClassifierFreeSamplingEmbeddings(learnable=_UpperCAmelCase )
snake_case_ = VQDiffusionPipeline(
vqvae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , transformer=_UpperCAmelCase , scheduler=_UpperCAmelCase , learned_classifier_free_sampling_embeddings=_UpperCAmelCase , )
snake_case_ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = '''teddy bear playing in the pool'''
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
snake_case_ = pipe([prompt] , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='''np''' )
snake_case_ = output.images
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
snake_case_ = pipe(
[prompt] , generator=_UpperCAmelCase , output_type='''np''' , return_dict=_UpperCAmelCase , num_inference_steps=2 )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
snake_case_ = '''cpu'''
snake_case_ = self.dummy_vqvae
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_transformer
snake_case_ = VQDiffusionScheduler(self.num_embed )
snake_case_ = LearnedClassifierFreeSamplingEmbeddings(
learnable=_UpperCAmelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ = VQDiffusionPipeline(
vqvae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , transformer=_UpperCAmelCase , scheduler=_UpperCAmelCase , learned_classifier_free_sampling_embeddings=_UpperCAmelCase , )
snake_case_ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = '''teddy bear playing in the pool'''
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
snake_case_ = pipe([prompt] , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='''np''' )
snake_case_ = output.images
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
snake_case_ = pipe(
[prompt] , generator=_UpperCAmelCase , output_type='''np''' , return_dict=_UpperCAmelCase , num_inference_steps=2 )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
snake_case_ = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
snake_case_ = pipeline.to(_UpperCAmelCase )
pipeline.set_progress_bar_config(disable=_UpperCAmelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
snake_case_ = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=_UpperCAmelCase , output_type='''np''' , )
snake_case_ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image ).max() < 2.0 | 267 | 1 |
from ... import PretrainedConfig
_a = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCamelCase__ = "nezha"
def __init__( self , UpperCAmelCase=2_1128 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=64 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=0.1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=True , **UpperCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = max_relative_position
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = classifier_dropout
_UpperCAmelCase = use_cache
| 39 |
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
_UpperCAmelCase = {} # Mapping from char to TrieNode
_UpperCAmelCase = False
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
for word in words:
self.insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
_UpperCAmelCase = TrieNode()
_UpperCAmelCase = curr.nodes[char]
_UpperCAmelCase = True
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
_UpperCAmelCase = curr.nodes[char]
return curr.is_leaf
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
def _delete(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
if index == len(UpperCAmelCase ):
# If word does not exist
if not curr.is_leaf:
return False
_UpperCAmelCase = False
return len(curr.nodes ) == 0
_UpperCAmelCase = word[index]
_UpperCAmelCase = curr.nodes.get(UpperCAmelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_UpperCAmelCase = _delete(UpperCAmelCase , UpperCAmelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCAmelCase , 0 )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
if node.is_leaf:
print(__lowerCAmelCase , end=' ' )
for key, value in node.nodes.items():
print_words(__lowerCAmelCase , word + key )
def __A ( )-> bool:
"""simple docstring"""
_UpperCAmelCase = 'banana bananas bandana band apple all beast'.split()
_UpperCAmelCase = TrieNode()
root.insert_many(__lowerCAmelCase )
# print_words(root, "")
assert all(root.find(__lowerCAmelCase ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
print(str(__lowerCAmelCase ) , 'works!' if passes else 'doesn\'t work :(' )
def __A ( )-> None:
"""simple docstring"""
assert test_trie()
def __A ( )-> None:
"""simple docstring"""
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 39 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : Dict ) -> int:
__a = len(lowerCAmelCase__ )
__a = sum(lowerCAmelCase__ )
__a = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__a = True
for i in range(1 , s + 1 ):
__a = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__a = dp[i][j - 1]
if arr[i - 1] <= j:
__a = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__a = s - 2 * j
break
return diff
| 364 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = ['image_processor', 'tokenizer']
__UpperCAmelCase : str = 'LayoutLMv3ImageProcessor'
__UpperCAmelCase : Optional[int] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self , _a=None , _a=None , **_a ):
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self , _a , _a = None , _a = None , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
__a = self.image_processor(images=_a , return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a ):
__a = [text] # add batch dimension (as the image processor always adds a batch dimension)
__a = features['''words''']
__a = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
__a = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__a = self.get_overflowing_images(_a , encoded_inputs['''overflow_to_sample_mapping'''] )
__a = images
return encoded_inputs
def __UpperCAmelCase ( self , _a , _a ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(_a )} and {len(_a )}''' )
return images_with_overflow
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@property
def __UpperCAmelCase ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 11 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
__SCREAMING_SNAKE_CASE : Dict = list[list[float | int]]
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Matrix:
snake_case_ = len(_SCREAMING_SNAKE_CASE )
snake_case_ = [[0 for _ in range(size + 1 )] for _ in range(_SCREAMING_SNAKE_CASE )]
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
for row in range(_SCREAMING_SNAKE_CASE ):
for col in range(_SCREAMING_SNAKE_CASE ):
snake_case_ = matrix[row][col]
snake_case_ = vector[row][0]
snake_case_ = 0
snake_case_ = 0
while row < size and col < size:
# pivoting
snake_case_ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
snake_case_ , snake_case_ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _SCREAMING_SNAKE_CASE ):
snake_case_ = augmented[rowa][col] / augmented[row][col]
snake_case_ = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _SCREAMING_SNAKE_CASE ):
for row in range(_SCREAMING_SNAKE_CASE ):
snake_case_ = augmented[row][col] / augmented[col][col]
for cola in range(_SCREAMING_SNAKE_CASE , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_SCREAMING_SNAKE_CASE )
]
def _a ( _SCREAMING_SNAKE_CASE ) -> Callable[[int], int]:
snake_case_ = len(_SCREAMING_SNAKE_CASE )
snake_case_ = [[0 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
snake_case_ = [[0] for _ in range(_SCREAMING_SNAKE_CASE )]
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
for x_val, y_val in enumerate(_SCREAMING_SNAKE_CASE ):
for col in range(_SCREAMING_SNAKE_CASE ):
snake_case_ = (x_val + 1) ** (size - col - 1)
snake_case_ = y_val
snake_case_ = solve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def interpolated_func(_SCREAMING_SNAKE_CASE ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_SCREAMING_SNAKE_CASE ) )
return interpolated_func
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def _a ( _SCREAMING_SNAKE_CASE = question_function , _SCREAMING_SNAKE_CASE = 10 ) -> int:
snake_case_ = [func(_SCREAMING_SNAKE_CASE ) for x_val in range(1 , order + 1 )]
snake_case_ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
snake_case_ = 0
snake_case_ = 42
snake_case_ = 42
for poly in polynomials:
snake_case_ = 1
while func(_SCREAMING_SNAKE_CASE ) == poly(_SCREAMING_SNAKE_CASE ):
x_val += 1
ret += poly(_SCREAMING_SNAKE_CASE )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 347 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]:
snake_case_ = bnb_quantization_config.load_in_abit
snake_case_ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
snake_case_ = []
# custom device map
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1:
snake_case_ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
snake_case_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE )
snake_case_ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
snake_case_ = []
snake_case_ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE )
# compatibility with peft
snake_case_ = load_in_abit
snake_case_ = load_in_abit
snake_case_ = get_parameter_device(_SCREAMING_SNAKE_CASE )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
snake_case_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE )
# convert param to the right dtype
snake_case_ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
snake_case_ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ):
param.to(_SCREAMING_SNAKE_CASE )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
snake_case_ = replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE )
snake_case_ = get_quantized_model_device_map(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
snake_case_ = True
snake_case_ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
if device_map is None:
if torch.cuda.is_available():
snake_case_ = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
snake_case_ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
snake_case_ = {}
snake_case_ = special_dtypes
snake_case_ = no_split_module_classes
snake_case_ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
snake_case_ = get_balanced_memory(
_SCREAMING_SNAKE_CASE , low_zero=(device_map == """balanced_low_0""") , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
snake_case_ = max_memory
snake_case_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# check if don't have any quantized module on the cpu
snake_case_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
snake_case_ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
if modules_to_not_convert is None:
snake_case_ = []
snake_case_ , snake_case_ = _replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
snake_case_ = False
for name, module in model.named_children():
if current_key_name is None:
snake_case_ = []
current_key_name.append(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
snake_case_ = """.""".join(_SCREAMING_SNAKE_CASE )
snake_case_ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
snake_case_ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
snake_case_ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
snake_case_ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
snake_case_ = module.weight.data
if module.bias is not None:
snake_case_ = module.bias.data
bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE )
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = True
if len(list(module.children() ) ) > 0:
snake_case_ , snake_case_ = _replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
# Create a copy of the model
with init_empty_weights():
snake_case_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
snake_case_ = find_tied_parameters(_SCREAMING_SNAKE_CASE )
# For compatibility with Accelerate < 0.18
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
snake_case_ = sum(_SCREAMING_SNAKE_CASE , [] )
snake_case_ = len(_SCREAMING_SNAKE_CASE ) > 0
# Check if it is a base model
snake_case_ = False
if hasattr(_SCREAMING_SNAKE_CASE , """base_model_prefix""" ):
snake_case_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case_ = list(model.named_children() )
snake_case_ = [list_modules[-1][0]]
# add last module together with tied weights
snake_case_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
snake_case_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE )
# remove ".weight" from the keys
snake_case_ = [""".weight""", """.bias"""]
snake_case_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case_ = name.replace(_SCREAMING_SNAKE_CASE , """""" )
filtered_module_names.append(_SCREAMING_SNAKE_CASE )
return filtered_module_names
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for m in model.modules():
if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ):
return True
return False
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
return next(parameter.parameters() ).device
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE )
snake_case_ = param_name
snake_case_ = model
if "." in tensor_name:
snake_case_ = tensor_name.split(""".""" )
for split in splits[:-1]:
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
snake_case_ = new_module
snake_case_ = splits[-1]
# offload weights
snake_case_ = False
offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , )
else:
offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """meta""" , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
| 347 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _snake_case ( a__ ):
@staticmethod
@abstractmethod
def A__ ( lowerCamelCase_: List[Any] ) -> Optional[Any]:
raise NotImplementedError()
@abstractmethod
def A__ ( self: Tuple ) -> Optional[Any]:
raise NotImplementedError()
| 369 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = '''pytorch_model.bin'''
@dataclasses.dataclass
class _snake_case :
'''simple docstring'''
A__ : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
A__ : Optional[str] = dataclasses.field(
default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class _snake_case :
'''simple docstring'''
A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
A__ : Optional[str] = dataclasses.field(
default=__snake_case , metadata={"help": "A csv or a json file containing the validation data."} )
A__ : Optional[str] = dataclasses.field(
default=__snake_case , metadata={"help": "The name of the task to train on."} , )
A__ : Optional[List[str]] = dataclasses.field(
default=__snake_case , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class _snake_case :
'''simple docstring'''
A__ : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
A__ : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
A__ : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
A__ : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
A__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
A__ : Optional[bool] = dataclasses.field(
default=__snake_case , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
A__ : Optional[bool] = dataclasses.field(
default=__snake_case , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
A__ : Optional[bool] = dataclasses.field(
default=__snake_case , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
A__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
A__ : Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
A__ : Optional[int] = dataclasses.field(
default=__snake_case , metadata={"help": "Random seed for initialization."} , )
def lowerCamelCase_ ( _a : str , _a : List[Any] , _a : List[Any] , _a : Dict , _a : int , _a : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
UpperCAmelCase_ : List[str] = dataset.filter(lambda _a : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
UpperCAmelCase_ : List[str] = int(eval_result * len(_a ) )
print(_a )
UpperCAmelCase_ : int = dataset.sort("""probability""" , reverse=_a )
UpperCAmelCase_ : Optional[int] = dataset.select(range(_a ) )
UpperCAmelCase_ : List[str] = dataset.remove_columns(["""label""", """probability"""] )
UpperCAmelCase_ : Optional[Any] = dataset.rename_column("""prediction""" , """label""" )
UpperCAmelCase_ : Union[str, Any] = dataset.map(lambda _a : {"label": idalabel[example["label"]]} )
UpperCAmelCase_ : int = dataset.shuffle(seed=args.seed )
UpperCAmelCase_ : int = os.path.join(_a , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(_a , index=_a )
else:
dataset.to_json(_a )
def lowerCamelCase_ ( _a : Any , _a : int , _a : Dict , _a : List[Any] , **_a : int ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase_ : Tuple = STModelArguments(model_name_or_path=_a )
UpperCAmelCase_ : str = STDataArguments(train_file=_a , infer_file=_a )
UpperCAmelCase_ : Optional[Any] = STTrainingArguments(output_dir=_a )
UpperCAmelCase_ : Optional[Any] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_a ).items():
setattr(_a , _a , _a )
for key, value in kwargs.items():
if hasattr(_a , _a ):
setattr(_a , _a , _a )
# Sanity checks
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : Any = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
UpperCAmelCase_ : List[Any] = args.train_file
UpperCAmelCase_ : Tuple = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
UpperCAmelCase_ : Dict = args.eval_file
for key in data_files:
UpperCAmelCase_ : List[str] = data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
UpperCAmelCase_ : int = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
UpperCAmelCase_ : int = F'''{args.output_dir}/self-train_iter-{{}}'''.format
UpperCAmelCase_ : List[Any] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_a )
os.makedirs(_a , exist_ok=_a )
accelerator.wait_for_everyone()
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : List[Any] = False
# Show the progress bar
UpperCAmelCase_ : List[str] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
UpperCAmelCase_ : Any = data_dir_format(_a )
assert os.path.exists(_a )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
UpperCAmelCase_ : List[str] = os.path.join(_a , """stage-1""" )
UpperCAmelCase_ : Optional[int] = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_a , _a ):
arguments_dict.update({key: value} )
UpperCAmelCase_ : Any = os.path.join(_a , """best-checkpoint""" , _a )
if os.path.exists(_a ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , _a , _a , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , _a )
finetune(**_a )
accelerator.wait_for_everyone()
assert os.path.exists(_a )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , _a )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
UpperCAmelCase_ : Dict = os.path.join(_a , """best-checkpoint""" )
UpperCAmelCase_ : str = os.path.join(_a , """stage-2""" )
# Update arguments_dict
UpperCAmelCase_ : Union[str, Any] = model_path
UpperCAmelCase_ : Dict = data_files["""train"""]
UpperCAmelCase_ : List[str] = current_output_dir
UpperCAmelCase_ : str = os.path.join(_a , """best-checkpoint""" , _a )
if os.path.exists(_a ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , _a , _a , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , _a )
finetune(**_a )
accelerator.wait_for_everyone()
assert os.path.exists(_a )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , _a )
UpperCAmelCase_ : Optional[Any] = iteration
UpperCAmelCase_ : List[str] = data_dir_format(iteration + 1 )
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(os.path.join(_a , """best-checkpoint""" ) )
UpperCAmelCase_ : str = config.idalabel
UpperCAmelCase_ : Union[str, Any] = os.path.join(_a , """eval_results_best-checkpoint.json""" )
UpperCAmelCase_ : int = os.path.join(_a , """test_results_best-checkpoint.json""" )
assert os.path.exists(_a )
with open(_a , """r""" ) as f:
UpperCAmelCase_ : Optional[int] = float(json.load(_a )[args.eval_metric] )
UpperCAmelCase_ : Dict = os.path.join(_a , """infer_output_best-checkpoint.csv""" )
assert os.path.exists(_a )
# Loading the dataset from local csv or json files.
UpperCAmelCase_ : Optional[Any] = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""]
UpperCAmelCase_ : List[str] = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(_a , exist_ok=_a )
shutil.copy(_a , os.path.join(_a , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(_a ):
shutil.copy(_a , os.path.join(_a , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(_a , _a , _a , _a , _a , _a )
accelerator.wait_for_everyone()
UpperCAmelCase_ : Tuple = os.path.join(_a , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
UpperCAmelCase_ : Optional[Any] = eval_result
if best_iteration is None:
UpperCAmelCase_ : Optional[int] = new_iteration
UpperCAmelCase_ : Union[str, Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
UpperCAmelCase_ : List[str] = new_iteration
UpperCAmelCase_ : Union[str, Any] = new_eval_result
UpperCAmelCase_ : int = 0
else:
if new_eval_result == best_eval_result:
UpperCAmelCase_ : Dict = new_iteration
UpperCAmelCase_ : Optional[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
UpperCAmelCase_ : List[Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , _a )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , _a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_a , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(_a , """eval_results_best-iteration.json""" ) , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , _a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_a , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(_a , """eval_results_best-iteration.json""" ) , )
| 59 | 0 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = BartphoTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : str = ["▁This", "▁is", "▁a", "▁t", "est"]
_lowerCAmelCase : List[str] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file, "w", encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n")
_lowerCAmelCase : Optional[Any] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self, **__a):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "This is a là test"
_lowerCAmelCase : Optional[int] = "This is a<unk><unk> test"
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map)
_lowerCAmelCase : List[Any] = "This is a là test"
_lowerCAmelCase : str = "▁This ▁is ▁a ▁l à ▁t est".split()
_lowerCAmelCase : str = tokenizer.tokenize(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a), __a)
| 36 |
import math
import unittest
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) ,"""Zero doesn't have any positive factors, primes must have exactly two.""" ,)
self.assertFalse(
is_prime(1 ) ,"""One only has 1 positive factor, primes must have exactly two.""" ,)
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 296 | 0 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : Tuple = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = "align_text_model"
def __init__( self : Dict , UpperCAmelCase_ : int=30522 , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Any=3072 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Tuple=1E-12 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : Optional[Any]="absolute" , UpperCAmelCase_ : Dict=True , **UpperCAmelCase_ : List[Any] , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : str = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : Dict = num_attention_heads
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : Any = intermediate_size
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : List[Any] = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : Tuple = initializer_range
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : Dict = position_embedding_type
lowerCAmelCase : Tuple = use_cache
lowerCAmelCase : int = pad_token_id
@classmethod
def lowercase__ ( cls : str , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : Optional[Any] ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
lowerCAmelCase : Any = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
lowerCAmelCase : List[str] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : int = "align_vision_model"
def __init__( self : Optional[int] , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 600 , UpperCAmelCase_ : float = 2.0 , UpperCAmelCase_ : float = 3.1 , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCAmelCase_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCAmelCase_ : List[int] = [] , UpperCAmelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase_ : float = 0.25 , UpperCAmelCase_ : str = "swish" , UpperCAmelCase_ : int = 2560 , UpperCAmelCase_ : str = "mean" , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : float = 0.0_01 , UpperCAmelCase_ : float = 0.99 , UpperCAmelCase_ : float = 0.2 , **UpperCAmelCase_ : Optional[int] , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : str = num_channels
lowerCAmelCase : Optional[Any] = image_size
lowerCAmelCase : Any = width_coefficient
lowerCAmelCase : Optional[Any] = depth_coefficient
lowerCAmelCase : List[str] = depth_divisor
lowerCAmelCase : Tuple = kernel_sizes
lowerCAmelCase : Any = in_channels
lowerCAmelCase : List[Any] = out_channels
lowerCAmelCase : Union[str, Any] = depthwise_padding
lowerCAmelCase : List[Any] = strides
lowerCAmelCase : Tuple = num_block_repeats
lowerCAmelCase : Optional[Any] = expand_ratios
lowerCAmelCase : int = squeeze_expansion_ratio
lowerCAmelCase : List[str] = hidden_act
lowerCAmelCase : Tuple = hidden_dim
lowerCAmelCase : Tuple = pooling_type
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : Union[str, Any] = batch_norm_eps
lowerCAmelCase : List[Any] = batch_norm_momentum
lowerCAmelCase : int = drop_connect_rate
lowerCAmelCase : Union[str, Any] = sum(UpperCAmelCase_ ) * 4
@classmethod
def lowercase__ ( cls : int , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : Any ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
lowerCAmelCase : Any = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
lowerCAmelCase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : int = "align"
lowerCAmelCase_ : str = True
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=640 , UpperCAmelCase_ : Optional[int]=1.0 , UpperCAmelCase_ : Optional[int]=0.02 , **UpperCAmelCase_ : str , ):
super().__init__(**UpperCAmelCase_ )
if text_config is None:
lowerCAmelCase : Tuple = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
lowerCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
lowerCAmelCase : int = AlignTextConfig(**UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = AlignVisionConfig(**UpperCAmelCase_ )
lowerCAmelCase : Tuple = projection_dim
lowerCAmelCase : Optional[Any] = temperature_init_value
lowerCAmelCase : int = initializer_range
@classmethod
def lowercase__ ( cls : List[Any] , UpperCAmelCase_ : AlignTextConfig , UpperCAmelCase_ : AlignVisionConfig , **UpperCAmelCase_ : Dict ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase : Tuple = self.text_config.to_dict()
lowerCAmelCase : Union[str, Any] = self.vision_config.to_dict()
lowerCAmelCase : Any = self.__class__.model_type
return output
| 370 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase : str = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase : Tuple = {'unk_token': '<unk>'}
lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase_ ) )
lowerCAmelCase : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Any , **UpperCAmelCase_ : Dict ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Tuple , **UpperCAmelCase_ : str ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = self.get_tokenizer()
lowerCAmelCase : List[str] = self.get_rust_tokenizer()
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase : int = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Any = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = self.prepare_image_inputs()
lowerCAmelCase : List[str] = image_processor(UpperCAmelCase_ , return_tensors='np' )
lowerCAmelCase : int = processor(images=UpperCAmelCase_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : Dict = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = 'lower newer'
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = 'lower newer'
lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase : Union[str, Any] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : str = self.get_tokenizer()
lowerCAmelCase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase : Any = processor.batch_decode(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = 'lower newer'
lowerCAmelCase : Tuple = self.prepare_image_inputs()
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 323 | 0 |
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def lowercase_ ( self :int ) -> int:
'''simple docstring'''
__A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_A , 'num_attention_heads' ) )
class UpperCamelCase__ :
def __init__( self :Dict , _A :List[Any] , _A :str=13 , _A :Union[str, Any]=64 , _A :int=3 , _A :Optional[Any]=3 , _A :List[str]=2 , _A :Tuple=1 , _A :Any=16 , _A :List[str]=[128, 256, 384] , _A :Dict=[4, 6, 8] , _A :Union[str, Any]=[2, 3, 4] , _A :Optional[int]=[16, 16, 16] , _A :int=0 , _A :List[Any]=[2, 2, 2] , _A :Optional[int]=[2, 2, 2] , _A :Tuple=0.02 , _A :Optional[Any]=True , _A :Optional[int]=True , _A :int=2 , ) -> Union[str, Any]:
'''simple docstring'''
__A = parent
__A = batch_size
__A = image_size
__A = num_channels
__A = kernel_size
__A = stride
__A = padding
__A = hidden_sizes
__A = num_attention_heads
__A = depths
__A = key_dim
__A = drop_path_rate
__A = patch_size
__A = attention_ratio
__A = mlp_ratio
__A = initializer_range
__A = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__A = is_training
__A = use_labels
__A = num_labels
__A = initializer_range
def lowercase_ ( self :int ) -> List[str]:
'''simple docstring'''
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.num_labels )
__A = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def lowercase_ ( self :List[str] , _A :List[str] , _A :List[Any] , _A :Tuple ) -> Optional[int]:
'''simple docstring'''
__A = LevitModel(config=_A )
model.to(_A )
model.eval()
__A = model(_A )
__A = (self.image_size, self.image_size)
__A , __A = image_size[0], image_size[1]
for _ in range(4 ):
__A = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__A = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def lowercase_ ( self :Dict , _A :Optional[Any] , _A :Tuple , _A :Optional[Any] ) -> Any:
'''simple docstring'''
__A = self.num_labels
__A = LevitForImageClassification(_A )
model.to(_A )
model.eval()
__A = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase):
UpperCAmelCase__ : List[Any] = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Dict = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : int = False
UpperCAmelCase__ : int = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : int = False
def lowercase_ ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
__A = LevitModelTester(self )
__A = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def lowercase_ ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def lowercase_ ( self :List[str] ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def lowercase_ ( self :int ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not output attentions' )
def lowercase_ ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
pass
def lowercase_ ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(_A )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def lowercase_ ( self :Any ) -> Any:
'''simple docstring'''
def check_hidden_states_output(_A :Dict , _A :Optional[int] , _A :Any ):
__A = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(_A , _A ) )
__A = outputs.hidden_states
__A = len(self.model_tester.depths ) + 1
self.assertEqual(len(_A ) , _A )
__A = (self.model_tester.image_size, self.model_tester.image_size)
__A , __A = image_size[0], image_size[1]
for _ in range(4 ):
__A = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__A = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(_A , _A , _A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self :List[Any] ) -> List[str]:
'''simple docstring'''
pass
def lowercase_ ( self :Optional[int] , _A :Optional[Any] , _A :Optional[int] , _A :Union[str, Any]=False ) -> Tuple:
'''simple docstring'''
__A = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase_ ( self :Dict ) -> List[str]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self :Dict ) -> Optional[int]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def lowercase_ ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
if not self.model_tester.is_training:
return
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_A )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__A = model_class(_A )
model.to(_A )
model.train()
__A = self._prepare_for_class(_A , _A , return_labels=_A )
__A = model(**_A ).loss
loss.backward()
def lowercase_ ( self :Any ) -> int:
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__A = False
__A = True
for model_class in self.all_model_classes:
if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__A = model_class(_A )
model.gradient_checkpointing_enable()
model.to(_A )
model.train()
__A = self._prepare_for_class(_A , _A , return_labels=_A )
__A = model(**_A ).loss
loss.backward()
def lowercase_ ( self :int ) -> str:
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_A ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
__A = problem_type['title']
__A = problem_type['num_labels']
__A = model_class(_A )
model.to(_A )
model.train()
__A = self._prepare_for_class(_A , _A , return_labels=_A )
if problem_type["num_labels"] > 1:
__A = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
__A = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_A ) as warning_list:
__A = model(**_A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def lowercase_ ( self :Any ) -> Tuple:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = LevitModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def snake_case ( )-> Any:
"""simple docstring"""
__A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase):
@cached_property
def lowercase_ ( self :Any ) -> Dict:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase_ ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__A = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=_A , return_tensors='pt' ).to(_A )
# forward pass
with torch.no_grad():
__A = model(**_A )
# verify the logits
__A = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _A )
__A = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 161 |
'''simple docstring'''
from __future__ import annotations
def snake_case ( UpperCAmelCase )-> list[int]:
"""simple docstring"""
__A = 2
__A = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(UpperCAmelCase )
if n > 1:
factors.append(UpperCAmelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : Union[str, Any] ):
_snake_case = tempfile.mkdtemp()
# fmt: off
_snake_case = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
_snake_case = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
_snake_case = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
_snake_case = {'''unk_token''': '''<unk>'''}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
_snake_case = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_snake_case = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Optional[Any] , **_lowerCamelCase : List[str] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **_lowerCamelCase )
def lowercase ( self : Any , **_lowerCamelCase : List[str] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **_lowerCamelCase )
def lowercase ( self : str , **_lowerCamelCase : Any ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Dict ):
_snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : List[Any] ):
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = self.get_image_processor()
_snake_case = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
_snake_case = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def lowercase ( self : Optional[int] ):
_snake_case = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_snake_case = self.get_image_processor(do_normalize=_lowerCamelCase )
_snake_case = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def lowercase ( self : Dict ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = self.prepare_image_inputs()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''np''' )
_snake_case = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase ( self : Optional[Any] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = processor(text=_lowerCamelCase , return_tensors='''np''' )
_snake_case = tokenizer(_lowerCamelCase , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def lowercase ( self : Optional[Any] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = self.prepare_image_inputs()
_snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def lowercase ( self : Optional[int] ):
_snake_case = '''google/owlvit-base-patch32'''
_snake_case = OwlViTProcessor.from_pretrained(_lowerCamelCase )
_snake_case = ['''cat''', '''nasa badge''']
_snake_case = processor(text=_lowerCamelCase )
_snake_case = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def lowercase ( self : Optional[int] ):
_snake_case = '''google/owlvit-base-patch32'''
_snake_case = OwlViTProcessor.from_pretrained(_lowerCamelCase )
_snake_case = [['''cat''', '''nasa badge'''], ['''person''']]
_snake_case = processor(text=_lowerCamelCase )
_snake_case = 16
_snake_case = len(_lowerCamelCase )
_snake_case = max([len(_lowerCamelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def lowercase ( self : Union[str, Any] ):
_snake_case = '''google/owlvit-base-patch32'''
_snake_case = OwlViTProcessor.from_pretrained(_lowerCamelCase )
_snake_case = ['''cat''', '''nasa badge''']
_snake_case = processor(text=_lowerCamelCase )
_snake_case = 16
_snake_case = inputs['''input_ids''']
_snake_case = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def lowercase ( self : List[str] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = self.prepare_image_inputs()
_snake_case = self.prepare_image_inputs()
_snake_case = processor(images=_lowerCamelCase , query_images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def lowercase ( self : str ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case = processor.batch_decode(_lowerCamelCase )
_snake_case = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
| 40 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : int ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class a_ (_a ):
__lowerCAmelCase : List[Any] = """gpt_neox"""
def __init__( self , snake_case_=5_0_4_3_2 , snake_case_=6_1_4_4 , snake_case_=4_4 , snake_case_=6_4 , snake_case_=2_4_5_7_6 , snake_case_="gelu" , snake_case_=0.25 , snake_case_=1_0_0_0_0 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=2_0_4_8 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=True , snake_case_=0 , snake_case_=2 , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : str = rotary_pct
_lowerCAmelCase : Union[str, Any] = rotary_emb_base
_lowerCAmelCase : List[str] = attention_dropout
_lowerCAmelCase : int = hidden_dropout
_lowerCAmelCase : List[Any] = classifier_dropout
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Any = use_cache
_lowerCAmelCase : str = tie_word_embeddings
_lowerCAmelCase : Union[str, Any] = use_parallel_residual
_lowerCAmelCase : Any = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def __UpperCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'got {self.rope_scaling}' )
_lowerCAmelCase : List[Any] = self.rope_scaling.get("""type""" , snake_case_ )
_lowerCAmelCase : List[str] = self.rope_scaling.get("""factor""" , snake_case_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(snake_case_ , snake_case_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 309 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309 | 1 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Any = logging.get_logger(__name__)
A : Optional[Any] = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''data2vec-audio'''
def __init__(self : Dict , _UpperCAmelCase : List[Any]=32 , _UpperCAmelCase : List[str]=768 , _UpperCAmelCase : str=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : str=3072 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : str=1E-5 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : List[str]=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase : Any=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase : List[Any]=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase : Any=False , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Any=19 , _UpperCAmelCase : str=5 , _UpperCAmelCase : List[str]=0.05 , _UpperCAmelCase : Any=10 , _UpperCAmelCase : str=2 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : Optional[int]=10 , _UpperCAmelCase : List[str]=0 , _UpperCAmelCase : Tuple="sum" , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Any=256 , _UpperCAmelCase : str=(512, 512, 512, 512, 1500) , _UpperCAmelCase : Dict=(5, 3, 3, 1, 1) , _UpperCAmelCase : Optional[Any]=(1, 2, 3, 1, 1) , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Optional[Any]=1 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : Optional[int] , ) -> List[str]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
lowercase__ = hidden_size
lowercase__ = feat_extract_activation
lowercase__ = list(_UpperCAmelCase )
lowercase__ = list(_UpperCAmelCase )
lowercase__ = list(_UpperCAmelCase )
lowercase__ = conv_bias
lowercase__ = num_conv_pos_embeddings
lowercase__ = num_conv_pos_embedding_groups
lowercase__ = conv_pos_kernel_size
lowercase__ = len(self.conv_dim )
lowercase__ = num_hidden_layers
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = feat_proj_dropout
lowercase__ = final_dropout
lowercase__ = layerdrop
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = vocab_size
lowercase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
lowercase__ = mask_feature_min_masks
# ctc loss
lowercase__ = ctc_loss_reduction
lowercase__ = ctc_zero_infinity
# adapter
lowercase__ = add_adapter
lowercase__ = adapter_kernel_size
lowercase__ = adapter_stride
lowercase__ = num_adapter_layers
lowercase__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__ = list(_UpperCAmelCase )
lowercase__ = list(_UpperCAmelCase )
lowercase__ = list(_UpperCAmelCase )
lowercase__ = xvector_output_dim
@property
def lowerCamelCase__ (self : Optional[int] ) -> Tuple:
"""simple docstring"""
return math.prod(self.conv_stride )
| 146 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A : Dict = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ['GLPNFeatureExtractor']
A : List[str] = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 146 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Optional[Any] =get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : Dict = BartphoTokenizer
snake_case__ : Optional[Any] = False
snake_case__ : List[str] = True
def A__ ( self ):
"""simple docstring"""
super().setUp()
lowercase = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
lowercase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowercase = {"""unk_token""": """<unk>"""}
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f'{token} {vocab_tokens[token]}\n' )
lowercase = BartphoTokenizer(__lowerCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """This is a là test"""
lowercase = """This is a<unk><unk> test"""
return input_text, output_text
def A__ ( self ):
"""simple docstring"""
lowercase = BartphoTokenizer(__lowerCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
lowercase = """This is a là test"""
lowercase = """▁This ▁is ▁a ▁l à ▁t est""".split()
lowercase = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase = tokens + [tokenizer.unk_token]
lowercase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
| 197 | """simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__lowerCAmelCase : int =logging.getLogger(__name__)
class _A :
def __init__( self ):
"""simple docstring"""
lowercase = False
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if not self.initialized:
lowercase = RagRetriever(
__lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , index=__lowerCAmelCase , init_retrieval=__lowerCAmelCase , )
lowercase = True
def A__ ( self ):
"""simple docstring"""
self.retriever.index.init_index()
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase , lowercase = self.retriever._main_retrieve(__lowerCAmelCase , __lowerCAmelCase )
return doc_ids, retrieved_doc_embeds
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
if index is not None and index.is_initialized() and len(__lowerCAmelCase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
__lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , index=__lowerCAmelCase , init_retrieval=__lowerCAmelCase , )
lowercase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for worker in self.retrieval_workers
] )
def A__ ( self ):
"""simple docstring"""
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
lowercase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
lowercase , lowercase = ray.get(random_worker.retrieve.remote(__lowerCAmelCase , __lowerCAmelCase ) )
else:
lowercase , lowercase = self._main_retrieve(__lowerCAmelCase , __lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
return super(__lowerCAmelCase , cls ).get_tokenizers(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = kwargs.pop("""config""" , __lowerCAmelCase ) or RagConfig.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowercase = RagTokenizer.from_pretrained(__lowerCAmelCase , config=__lowerCAmelCase )
lowercase = rag_tokenizer.question_encoder
lowercase = rag_tokenizer.generator
if indexed_dataset is not None:
lowercase = """custom"""
lowercase = CustomHFIndex(config.retrieval_vector_size , __lowerCAmelCase )
else:
lowercase = cls._build_index(__lowerCAmelCase )
return cls(
__lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , retrieval_workers=__lowerCAmelCase , index=__lowerCAmelCase , )
| 197 | 1 |
"""simple docstring"""
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))''')) | 366 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
__UpperCamelCase = 'CIDAS/clipseg-rd64-refined'
__UpperCamelCase = 'image_segmenter'
__UpperCamelCase = CLIPSegForImageSegmentation
__UpperCamelCase = ['image', 'text']
__UpperCamelCase = ['image']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase , return_tensors="""pt""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase = self.model(**lowerCamelCase ).logits
return logits
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = outputs.cpu().detach().numpy()
_lowerCAmelCase = 0
_lowerCAmelCase = 1
return Image.fromarray((array * 255).astype(np.uinta ) ) | 317 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
lowerCamelCase = []
def _lowerCAmelCase ( self , _a , _a , _a , **_a ):
"""simple docstring"""
self.events.append("""on_init_end""" )
def _lowerCAmelCase ( self , _a , _a , _a , **_a ):
"""simple docstring"""
self.events.append("""on_train_begin""" )
def _lowerCAmelCase ( self , _a , _a , _a , **_a ):
"""simple docstring"""
self.events.append("""on_train_end""" )
def _lowerCAmelCase ( self , _a , _a , _a , **_a ):
"""simple docstring"""
self.events.append("""on_epoch_begin""" )
def _lowerCAmelCase ( self , _a , _a , _a , **_a ):
"""simple docstring"""
self.events.append("""on_epoch_end""" )
def _lowerCAmelCase ( self , _a , _a , _a , **_a ):
"""simple docstring"""
self.events.append("""on_step_begin""" )
def _lowerCAmelCase ( self , _a , _a , _a , **_a ):
"""simple docstring"""
self.events.append("""on_step_end""" )
def _lowerCAmelCase ( self , _a , _a , _a , **_a ):
"""simple docstring"""
self.events.append("""on_evaluate""" )
def _lowerCAmelCase ( self , _a , _a , _a , **_a ):
"""simple docstring"""
self.events.append("""on_predict""" )
def _lowerCAmelCase ( self , _a , _a , _a , **_a ):
"""simple docstring"""
self.events.append("""on_save""" )
def _lowerCAmelCase ( self , _a , _a , _a , **_a ):
"""simple docstring"""
self.events.append("""on_log""" )
def _lowerCAmelCase ( self , _a , _a , _a , **_a ):
"""simple docstring"""
self.events.append("""on_prediction_step""" )
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = tempfile.mkdtemp()
def _lowerCAmelCase ( self ):
"""simple docstring"""
shutil.rmtree(self.output_dir )
def _lowerCAmelCase ( self , _a=0 , _a=0 , _a=64 , _a=64 , _a=None , _a=False , **_a ):
"""simple docstring"""
lowerCamelCase = RegressionDataset(length=_a )
lowerCamelCase = RegressionDataset(length=_a )
lowerCamelCase = RegressionModelConfig(a=_a , b=_a )
lowerCamelCase = RegressionPreTrainedModel(_a )
lowerCamelCase = TrainingArguments(self.output_dir , disable_tqdm=_a , report_to=[] , **_a )
return Trainer(
_a , _a , train_dataset=_a , eval_dataset=_a , callbacks=_a , )
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
self.assertEqual(len(_a ) , len(_a ) )
# Order doesn't matter
lowerCamelCase = sorted(_a , key=lambda _a : cb.__name__ if isinstance(_a , _a ) else cb.__class__.__name__ )
lowerCamelCase = sorted(_a , key=lambda _a : cb.__name__ if isinstance(_a , _a ) else cb.__class__.__name__ )
for cba, cba in zip(_a , _a ):
if isinstance(_a , _a ) and isinstance(_a , _a ):
self.assertEqual(_a , _a )
elif isinstance(_a , _a ) and not isinstance(_a , _a ):
self.assertEqual(_a , cba.__class__ )
elif not isinstance(_a , _a ) and isinstance(_a , _a ):
self.assertEqual(cba.__class__ , _a )
else:
self.assertEqual(_a , _a )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = ["""on_init_end""", """on_train_begin"""]
lowerCamelCase = 0
lowerCamelCase = len(trainer.get_eval_dataloader() )
lowerCamelCase = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(_a ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_trainer()
lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _a )
# Callbacks passed at init are added to the default callbacks
lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(_a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _a )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowerCamelCase = self.get_trainer(disable_tqdm=_a )
lowerCamelCase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowerCamelCase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(_a )
expected_callbacks.remove(_a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _a )
lowerCamelCase = self.get_trainer()
lowerCamelCase = trainer.pop_callback(_a )
self.assertEqual(cb.__class__ , _a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _a )
trainer.add_callback(_a )
expected_callbacks.insert(0 , _a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _a )
# We can also add, pop, or remove by instance
lowerCamelCase = self.get_trainer()
lowerCamelCase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(_a )
expected_callbacks.remove(_a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _a )
lowerCamelCase = self.get_trainer()
lowerCamelCase = trainer.callback_handler.callbacks[0]
lowerCamelCase = trainer.pop_callback(_a )
self.assertEqual(_a , _a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _a )
trainer.add_callback(_a )
expected_callbacks.insert(0 , _a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=_a )
lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_a , self.get_expected_events(_a ) )
# Independent log/save/eval
lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_a , self.get_expected_events(_a ) )
lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_a , self.get_expected_events(_a ) )
lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_a , self.get_expected_events(_a ) )
lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_a , self.get_expected_events(_a ) )
# A bit of everything
lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_a , self.get_expected_events(_a ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(_a ) in warn_mock.call_args[0][0]
| 291 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowerCamelCase_ ={
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
lowerCamelCase_ ={
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16_000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
lowerCamelCase_ =tempfile.mkdtemp()
lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ =os.path.join(self.tmpdirname, lowerCAmelCase )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
with open(self.feature_extraction_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
# load decoder from hub
lowerCamelCase_ ='''hf-internal-testing/ngram-beam-search-decoder'''
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor, lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, )
self.assertIsInstance(processor.decoder, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha, 5.0 )
self.assertEqual(processor.language_model.beta, 3.0 )
self.assertEqual(processor.language_model.score_boundary, -7.0 )
self.assertEqual(processor.language_model.unk_score_offset, 3 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(lowerCAmelCase, '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=lowerCAmelCase, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =floats_list((3, 1_000) )
lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' )
lowerCamelCase_ =processor(lowerCAmelCase, return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ ='''This is a test string'''
lowerCamelCase_ =processor(text=lowerCAmelCase )
lowerCamelCase_ =tokenizer(lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self, lowerCAmelCase=(2, 10, 16), lowerCAmelCase=77 ):
"""simple docstring"""
np.random.seed(lowerCAmelCase )
return np.random.rand(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits(shape=(10, 16), seed=13 )
lowerCamelCase_ =processor.decode(lowerCAmelCase )
lowerCamelCase_ =decoder.decode_beams(lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0], decoded_processor.text )
self.assertEqual('''</s> <s> </s>''', decoded_processor.text )
self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowerCamelCase_ =processor.batch_decode(lowerCAmelCase )
else:
with get_context(lowerCAmelCase ).Pool() as pool:
lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =list(lowerCAmelCase )
with get_context('''fork''' ).Pool() as p:
lowerCamelCase_ =decoder.decode_beams_batch(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =[], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCAmelCase, decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''], decoded_processor.text )
self.assertListEqual(lowerCAmelCase, decoded_processor.logit_score )
self.assertListEqual(lowerCAmelCase, decoded_processor.lm_score )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =15
lowerCamelCase_ =-2_0.0
lowerCamelCase_ =-4.0
lowerCamelCase_ =processor.batch_decode(
lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, )
lowerCamelCase_ =decoded_processor_out.text
lowerCamelCase_ =list(lowerCAmelCase )
with get_context('''fork''' ).Pool() as pool:
lowerCamelCase_ =decoder.decode_beams_batch(
lowerCAmelCase, lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, )
lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out]
lowerCamelCase_ =[d[0][2] for d in decoded_decoder_out]
lowerCamelCase_ =[d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''], lowerCAmelCase )
self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7], lowerCAmelCase, atol=1e-3 ) )
self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4], lowerCAmelCase, atol=1e-3 ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =2.0
lowerCamelCase_ =5.0
lowerCamelCase_ =-2_0.0
lowerCamelCase_ =True
lowerCamelCase_ =processor.batch_decode(
lowerCAmelCase, alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, )
lowerCamelCase_ =decoded_processor_out.text
lowerCamelCase_ =list(lowerCAmelCase )
decoder.reset_params(
alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, )
with get_context('''fork''' ).Pool() as pool:
lowerCamelCase_ =decoder.decode_beams_batch(
lowerCAmelCase, lowerCAmelCase, )
lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''], lowerCAmelCase )
lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha, 2.0 )
self.assertEqual(lm_model.beta, 5.0 )
self.assertEqual(lm_model.unk_score_offset, -2_0.0 )
self.assertEqual(lm_model.score_boundary, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCamelCase_ =os.listdir(lowerCAmelCase )
lowerCamelCase_ =['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =snapshot_download('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCamelCase_ =os.listdir(lowerCAmelCase )
lowerCamelCase_ =os.listdir(lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =floats_list((3, 1_000) )
lowerCamelCase_ =processor_wavaveca(lowerCAmelCase, return_tensors='''np''' )
lowerCamelCase_ =processor_auto(lowerCAmelCase, return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum(), input_auto[key].sum(), delta=1e-2 )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =processor_wavaveca.batch_decode(lowerCAmelCase )
lowerCamelCase_ =processor_auto.batch_decode(lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text, decoded_auto.text )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
self.assertListEqual(
processor.model_input_names, feature_extractor.model_input_names, msg='''`processor` and `feature_extractor` model input names do not match''', )
@staticmethod
def lowercase__ ( lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[d[key] for d in offsets]
return retrieved_list
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =self._get_dummy_logits()[0]
lowerCamelCase_ =processor.decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ) ), outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''start_offset''' ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''end_offset''' ), [1, 3, 5] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ) for o in outputs['''word_offsets''']], outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''start_offset''' ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''end_offset''' ), [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase__ ( self ):
"""simple docstring"""
import torch
lowerCamelCase_ =load_dataset('''common_voice''', '''en''', split='''train''', streaming=lowerCAmelCase )
lowerCamelCase_ =ds.cast_column('''audio''', datasets.Audio(sampling_rate=16_000 ) )
lowerCamelCase_ =iter(lowerCAmelCase )
lowerCamelCase_ =next(lowerCAmelCase )
lowerCamelCase_ =AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
lowerCamelCase_ =WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowerCamelCase_ =processor(sample['''audio''']['''array'''], return_tensors='''pt''' ).input_values
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase ).logits.cpu().numpy()
lowerCamelCase_ =processor.decode(logits[0], output_word_offsets=lowerCAmelCase )
lowerCamelCase_ =model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowerCamelCase_ =[
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
lowerCamelCase_ ='''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), lowerCAmelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), output.text )
# output times
lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''start_time''' ) )
lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''end_time''' ) )
# fmt: off
lowerCamelCase_ =torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
lowerCamelCase_ =torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) )
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) )
| 75 | 0 |
def A__ ( __lowerCamelCase = 1, __lowerCamelCase = 10_00 ):
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 0
for divide_by_number in range(__lowerCamelCase, digit + 1 ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = numerator
for _ in range(1, digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = divide_by_number
else:
has_been_divided.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , **_A ) -> Union[str, Any]:
super().__init__(**_A )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(_A )
def __call__( self , _A , _A = None , **_A , ) -> str:
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text_queries''' )
if isinstance(_A , (str, Image.Image) ):
SCREAMING_SNAKE_CASE_ = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
SCREAMING_SNAKE_CASE_ = image
SCREAMING_SNAKE_CASE_ = super().__call__(_A , **_A )
return results
def _UpperCamelCase ( self , **_A ) -> str:
SCREAMING_SNAKE_CASE_ = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE_ = kwargs['''threshold''']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE_ = kwargs['''top_k''']
return {}, {}, postprocess_params
def _UpperCamelCase ( self , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = load_image(inputs['''image'''] )
SCREAMING_SNAKE_CASE_ = inputs['''candidate_labels''']
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = candidate_labels.split(''',''' )
SCREAMING_SNAKE_CASE_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_A ):
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , return_tensors=self.framework )
SCREAMING_SNAKE_CASE_ = self.image_processor(_A , return_tensors=self.framework )
yield {
"is_last": i == len(_A ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _UpperCamelCase ( self , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = model_inputs.pop('''target_size''' )
SCREAMING_SNAKE_CASE_ = model_inputs.pop('''candidate_label''' )
SCREAMING_SNAKE_CASE_ = model_inputs.pop('''is_last''' )
SCREAMING_SNAKE_CASE_ = self.model(**_A )
SCREAMING_SNAKE_CASE_ = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def _UpperCamelCase ( self , _A , _A=0.1 , _A=None ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE_ = model_output['''candidate_label''']
SCREAMING_SNAKE_CASE_ = BaseModelOutput(_A )
SCREAMING_SNAKE_CASE_ = self.image_processor.post_process_object_detection(
outputs=_A , threshold=_A , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE_ = outputs['''scores'''][index].item()
SCREAMING_SNAKE_CASE_ = self._get_bounding_box(outputs['''boxes'''][index][0] )
SCREAMING_SNAKE_CASE_ = {'''score''': score, '''label''': label, '''box''': box}
results.append(_A )
SCREAMING_SNAKE_CASE_ = sorted(_A , key=lambda _A : x["score"] , reverse=_A )
if top_k:
SCREAMING_SNAKE_CASE_ = results[:top_k]
return results
def _UpperCamelCase ( self , _A ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = box.int().tolist()
SCREAMING_SNAKE_CASE_ = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 257 | 0 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
A_ : str = data_utils.TransfoXLTokenizer
A_ : str = data_utils.TransfoXLCorpus
A_ : List[str] = data_utils
A_ : int = data_utils
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case__ , """rb""" ) as fp:
SCREAMING_SNAKE_CASE__ = pickle.load(snake_case__ , encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
SCREAMING_SNAKE_CASE__ = corpus.vocab.__dict__
torch.save(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE__ = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , snake_case__ )
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(snake_case__ , snake_case__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
SCREAMING_SNAKE_CASE__ = os.path.abspath(snake_case__ )
SCREAMING_SNAKE_CASE__ = os.path.abspath(snake_case__ )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
SCREAMING_SNAKE_CASE__ = TransfoXLConfig()
else:
SCREAMING_SNAKE_CASE__ = TransfoXLConfig.from_json_file(snake_case__ )
print(f"""Building PyTorch model from configuration: {config}""" )
SCREAMING_SNAKE_CASE__ = TransfoXLLMHeadModel(snake_case__ )
SCREAMING_SNAKE_CASE__ = load_tf_weights_in_transfo_xl(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
SCREAMING_SNAKE_CASE__ = os.path.join(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE__ = os.path.join(snake_case__ , snake_case__ )
print(f"""Save PyTorch model to {os.path.abspath(snake_case__ )}""" )
torch.save(model.state_dict() , snake_case__ )
print(f"""Save configuration file to {os.path.abspath(snake_case__ )}""" )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
A_ : List[Any] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 165 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Model name or path of model to be trained.'} )
lowerCamelCase__ : Optional[str] = field(
default='./' ,metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' ,metadata={'help': 'Name or path of training dataset.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' ,metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase__ : Optional[int] = field(default=2 ,metadata={'help': 'Batch size for training.'} )
lowerCamelCase__ : Optional[int] = field(default=2 ,metadata={'help': 'Batch size for evaluation.'} )
lowerCamelCase__ : Optional[float] = field(default=0.1 ,metadata={'help': 'Value of weight decay.'} )
lowerCamelCase__ : Optional[int] = field(
default=1_0_0_0_0 ,metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCamelCase__ : Optional[float] = field(default=2E-4 ,metadata={'help': 'Learning rate fo training.'} )
lowerCamelCase__ : Optional[str] = field(default='cosine' ,metadata={'help': 'Learning rate.'} )
lowerCamelCase__ : Optional[int] = field(
default=7_5_0 ,metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCamelCase__ : Optional[int] = field(
default=1_6 ,metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCamelCase__ : Optional[bool] = field(
default=A__ ,metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCamelCase__ : Optional[int] = field(default=5_0_0_0_0 ,metadata={'help': 'Maximum number of training steps.'} )
lowerCamelCase__ : Optional[int] = field(
default=-1 ,metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase__ : Optional[int] = field(default=1_0_2_4 ,metadata={'help': 'Sequence lengths used for training.'} )
lowerCamelCase__ : Optional[int] = field(default=1 ,metadata={'help': 'Training seed.'} )
lowerCamelCase__ : Optional[int] = field(
default=1_0_2_4 ,metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} ,)
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCamelCase__ : Optional[bool] = field(default=A__ ,metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' ,metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase__ : Optional[int] = field(default=2 ,metadata={'help': 'Batch size used for evaluation.'} )
lowerCamelCase__ : Optional[int] = field(
default=-1 ,metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase__ : Optional[int] = field(default=1_0_2_4 ,metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCamelCase__ : Optional[int] = field(default=1 ,metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase__ : Optional[int] = field(default=A__ ,metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCamelCase__ : Optional[int] = field(
default=A__ ,metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} ,)
lowerCamelCase__ : Optional[bool] = field(
default=A__ ,metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCamelCase__ : Optional[float] = field(default=0.2 ,metadata={'help': 'Sampling temperature used for generation.'} )
lowerCamelCase__ : Optional[int] = field(default=2_5_6 ,metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCamelCase__ : Optional[int] = field(default=0 ,metadata={'help': 'Top-k parameter used for generation.'} )
lowerCamelCase__ : Optional[float] = field(default=0.9_5 ,metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCamelCase__ : Optional[int] = field(default=1_0 ,metadata={'help': 'Number of generations to run in parallel.'} )
lowerCamelCase__ : Optional[int] = field(
default=2_0_0 ,metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCamelCase__ : Optional[int] = field(default=1 ,metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase__ : Optional[str] = field(
default='eval_results.json' ,metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase__ : Optional[str] = field(
default='0' ,metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCamelCase__ : Optional[int] = field(
default=-1 ,metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} ,)
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[int] = field(
default=A__ ,metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} ,)
lowerCamelCase__ : Optional[str] = field(
default='transformersbook/codeparrot' ,metadata={'help': 'Folder or name of dataset to process.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot-clean' ,metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCamelCase__ : Optional[int] = field(
default=1_0_0_0_0_0 ,metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCamelCase__ : Optional[str] = field(default='content' ,metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase__ : Optional[float] = field(
default=1_0_0_0 ,metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCamelCase__ : Optional[float] = field(
default=1_0_0 ,metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCamelCase__ : Optional[float] = field(
default=0.2_5 ,metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCamelCase__ : Optional[float] = field(
default=1.5 ,metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCamelCase__ : Optional[float] = field(
default=0.7 ,metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Name or path to the tokenizer.'} ,)
lowerCamelCase__ : Optional[bool] = field(
default=A__ ,metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCamelCase__ : Optional[float] = field(
default=0.8_5 ,metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='gpt2' ,metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCamelCase__ : Optional[str] = field(
default='transformersbook/codeparrot-train' ,metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCamelCase__ : Optional[str] = field(default='content' ,metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase__ : Optional[int] = field(default=2_0_0_0_0_0 ,metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCamelCase__ : Optional[int] = field(
default=3_2_7_6_8 ,metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCamelCase__ : Optional[str] = field(default='codeparrot' ,metadata={'help': 'Name of new tokenizer.'} )
lowerCamelCase__ : Optional[bool] = field(default=A__ ,metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Name or path to the tokenizer.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' ,metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCamelCase__ : Optional[str] = field(
default='tokenized-codeparrot-train' ,metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCamelCase__ : Optional[int] = field(default=A__ ,metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='gpt2-large' ,metadata={'help': 'Configuration to use for model initialization.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Tokenizer attached to model.'} )
lowerCamelCase__ : Optional[str] = field(default='codeparrot' ,metadata={'help': 'Name of the created model.'} )
lowerCamelCase__ : Optional[bool] = field(default=A__ ,metadata={'help': 'Push saved tokenizer to the hub.'} )
| 165 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0.999 , SCREAMING_SNAKE_CASE="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__UpperCamelCase :Optional[Any] = []
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Any = i / num_diffusion_timesteps
__UpperCamelCase :Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE ) / alpha_bar_fn(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) )
return torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
a__ : Union[str, Any] = [e.name for e in KarrasDiffusionSchedulers]
a__ : Dict = 2
@register_to_config
def __init__( self , __lowercase = 1_000 , __lowercase = 0.0_00_85 , __lowercase = 0.0_12 , __lowercase = "linear" , __lowercase = None , __lowercase = "epsilon" , __lowercase = "linspace" , __lowercase = 0 , ) -> Optional[int]:
if trained_betas is not None:
__UpperCamelCase :Any = torch.tensor(__lowercase , dtype=torch.floataa)
elif beta_schedule == "linear":
__UpperCamelCase :str = torch.linspace(__lowercase , __lowercase , __lowercase , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__UpperCamelCase :List[str] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowercase , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__UpperCamelCase :Optional[int] = betas_for_alpha_bar(__lowercase)
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""")
__UpperCamelCase :Any = 1.0 - self.betas
__UpperCamelCase :Optional[int] = torch.cumprod(self.alphas , dim=0)
# set all values
self.set_timesteps(__lowercase , __lowercase , __lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase=None) -> Any:
if schedule_timesteps is None:
__UpperCamelCase :Dict = self.timesteps
__UpperCamelCase :List[str] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter) == 0:
__UpperCamelCase :int = 1 if len(__lowercase) > 1 else 0
else:
__UpperCamelCase :str = timestep.cpu().item() if torch.is_tensor(__lowercase) else timestep
__UpperCamelCase :Union[str, Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase__ ( self) -> Tuple:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase__ ( self , __lowercase , __lowercase , ) -> torch.FloatTensor:
__UpperCamelCase :Dict = self.index_for_timestep(__lowercase)
if self.state_in_first_order:
__UpperCamelCase :Dict = self.sigmas[step_index]
else:
__UpperCamelCase :int = self.sigmas_interpol[step_index]
__UpperCamelCase :Tuple = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None , ) -> Tuple:
__UpperCamelCase :Optional[Any] = num_inference_steps
__UpperCamelCase :Any = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__UpperCamelCase :List[Any] = np.linspace(0 , num_train_timesteps - 1 , __lowercase , dtype=__lowercase)[::-1].copy()
elif self.config.timestep_spacing == "leading":
__UpperCamelCase :Dict = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCamelCase :List[str] = (np.arange(0 , __lowercase) * step_ratio).round()[::-1].copy().astype(__lowercase)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__UpperCamelCase :Optional[Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCamelCase :Dict = (np.arange(__lowercase , 0 , -step_ratio)).round().copy().astype(__lowercase)
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""")
__UpperCamelCase :List[str] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
__UpperCamelCase :Tuple = torch.from_numpy(np.log(__lowercase)).to(__lowercase)
__UpperCamelCase :Union[str, Any] = np.interp(__lowercase , np.arange(0 , len(__lowercase)) , __lowercase)
__UpperCamelCase :Any = np.concatenate([sigmas, [0.0]]).astype(np.floataa)
__UpperCamelCase :Optional[Any] = torch.from_numpy(__lowercase).to(device=__lowercase)
# interpolate sigmas
__UpperCamelCase :Optional[int] = sigmas.log().lerp(sigmas.roll(1).log() , 0.5).exp()
__UpperCamelCase :Tuple = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]])
__UpperCamelCase :str = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]])
if str(__lowercase).startswith('''mps'''):
# mps does not support float64
__UpperCamelCase :Any = torch.from_numpy(__lowercase).to(__lowercase , dtype=torch.floataa)
else:
__UpperCamelCase :List[str] = torch.from_numpy(__lowercase).to(__lowercase)
# interpolate timesteps
__UpperCamelCase :int = self.sigma_to_t(__lowercase).to(__lowercase , dtype=timesteps.dtype)
__UpperCamelCase :Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1).flatten()
__UpperCamelCase :Optional[Any] = torch.cat([timesteps[:1], interleaved_timesteps])
__UpperCamelCase :Union[str, Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__UpperCamelCase :str = defaultdict(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
# get log sigma
__UpperCamelCase :Optional[Any] = sigma.log()
# get distribution
__UpperCamelCase :Optional[Any] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__UpperCamelCase :Tuple = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2)
__UpperCamelCase :List[str] = low_idx + 1
__UpperCamelCase :str = self.log_sigmas[low_idx]
__UpperCamelCase :List[str] = self.log_sigmas[high_idx]
# interpolate sigmas
__UpperCamelCase :Tuple = (low - log_sigma) / (low - high)
__UpperCamelCase :List[str] = w.clamp(0 , 1)
# transform interpolation to time range
__UpperCamelCase :Tuple = (1 - w) * low_idx + w * high_idx
__UpperCamelCase :Optional[int] = t.view(sigma.shape)
return t
@property
def UpperCamelCase__ ( self) -> Union[str, Any]:
return self.sample is None
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase = True , ) -> Union[SchedulerOutput, Tuple]:
__UpperCamelCase :Union[str, Any] = self.index_for_timestep(__lowercase)
# advance index counter by 1
__UpperCamelCase :int = timestep.cpu().item() if torch.is_tensor(__lowercase) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__UpperCamelCase :List[Any] = self.sigmas[step_index]
__UpperCamelCase :List[str] = self.sigmas_interpol[step_index + 1]
__UpperCamelCase :Optional[Any] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__UpperCamelCase :List[Any] = self.sigmas[step_index - 1]
__UpperCamelCase :List[Any] = self.sigmas_interpol[step_index]
__UpperCamelCase :str = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__UpperCamelCase :str = 0
__UpperCamelCase :Optional[int] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__UpperCamelCase :Optional[Any] = sigma_hat if self.state_in_first_order else sigma_interpol
__UpperCamelCase :List[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__UpperCamelCase :Dict = sigma_hat if self.state_in_first_order else sigma_interpol
__UpperCamelCase :List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''')
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""")
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__UpperCamelCase :List[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__UpperCamelCase :Union[str, Any] = sigma_interpol - sigma_hat
# store for 2nd order step
__UpperCamelCase :Any = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__UpperCamelCase :Union[str, Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__UpperCamelCase :Tuple = sigma_next - sigma_hat
__UpperCamelCase :Tuple = self.sample
__UpperCamelCase :int = None
__UpperCamelCase :Optional[Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__UpperCamelCase :Any = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(__lowercase):
# mps does not support float64
__UpperCamelCase :Dict = self.timesteps.to(original_samples.device , dtype=torch.floataa)
__UpperCamelCase :Optional[Any] = timesteps.to(original_samples.device , dtype=torch.floataa)
else:
__UpperCamelCase :Any = self.timesteps.to(original_samples.device)
__UpperCamelCase :List[str] = timesteps.to(original_samples.device)
__UpperCamelCase :List[Any] = [self.index_for_timestep(__lowercase , __lowercase) for t in timesteps]
__UpperCamelCase :Any = sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
__UpperCamelCase :Any = sigma.unsqueeze(-1)
__UpperCamelCase :Optional[int] = original_samples + noise * sigma
return noisy_samples
def __len__( self) -> List[Any]:
return self.config.num_train_timesteps
| 355 | import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :List[str] = emb.weight.shape
__UpperCamelCase :str = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = emb.weight.data
return lin_layer
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
__UpperCamelCase :Tuple = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__UpperCamelCase :Dict = mam_aaa['''model''']
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__UpperCamelCase :Dict = MaMaaaConfig(
vocab_size=SCREAMING_SNAKE_CASE , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
__UpperCamelCase :Tuple = state_dict['''decoder.embed_tokens.weight''']
__UpperCamelCase :int = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE )
model.model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowercase = parser.parse_args()
__lowercase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 105 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ = logging.getLogger(__name__)
def _A ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=SCREAMING_SNAKE_CASE__ , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=SCREAMING_SNAKE_CASE__ , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=SCREAMING_SNAKE_CASE__ , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=SCREAMING_SNAKE_CASE__ , default=1000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=SCREAMING_SNAKE_CASE__ , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=SCREAMING_SNAKE_CASE__ , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=SCREAMING_SNAKE_CASE__ , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
__lowercase = parser.parse_args()
return args
def _A ( A__ ):
"""simple docstring"""
def fn(A__ ):
return tokenizer(examples['''text'''] )
return fn
def _A ( A__ ):
"""simple docstring"""
__lowercase = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
__lowercase = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
__lowercase = tf.train.Features(feature=SCREAMING_SNAKE_CASE__ )
__lowercase = tf.train.Example(features=SCREAMING_SNAKE_CASE__ )
__lowercase = example.SerializeToString()
records.append(SCREAMING_SNAKE_CASE__ )
return records
def _A ( A__ ):
"""simple docstring"""
__lowercase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__lowercase = min(len(SCREAMING_SNAKE_CASE__ ) , args.limit )
__lowercase = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
print(F"Limiting the dataset to {args.limit} entries." )
__lowercase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__lowercase = os.path.join(args.output_dir , args.split )
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
os.makedirs(SCREAMING_SNAKE_CASE__ )
else:
__lowercase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__lowercase = tokenize_function(SCREAMING_SNAKE_CASE__ )
__lowercase = dataset.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A__ ):
# Concatenate all texts.
__lowercase = {k: sum(examples[k] , [] ) for k in examples.keys()}
__lowercase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__lowercase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__lowercase = {
k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__lowercase = dataset_tokenized.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=1000 , num_proc=4 )
__lowercase = 0
__lowercase = 0
for shard in range(0 , len(SCREAMING_SNAKE_CASE__ ) , args.shard_size ):
__lowercase = grouped_dataset[shard : shard + args.shard_size]
__lowercase = len(dataset_snapshot['''input_ids'''] )
__lowercase = os.path.join(SCREAMING_SNAKE_CASE__ , F"dataset-{shard_count}-{records_containing}.tfrecord" )
__lowercase = get_serialized_examples(SCREAMING_SNAKE_CASE__ )
with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE__ ) as out_file:
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__lowercase = serialized_examples[i]
out_file.write(SCREAMING_SNAKE_CASE__ )
print('''Wrote file {} containing {} records'''.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , '''w''' ) as f:
print(F"Total {args.split} records: {total_records}" , file=SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase__ = parse_args()
main(args)
| 104 |
'''simple docstring'''
from timeit import timeit
UpperCAmelCase_ = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) // 2
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) <= 2:
return True
if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return s == s[::-1]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = F'''all({name}(key) is value for key, value in test_data.items())'''
UpperCAmelCase__ = F'''from __main__ import test_data, {name}'''
UpperCAmelCase__ = 500000
UpperCAmelCase__ = timeit(stmt=SCREAMING_SNAKE_CASE__ , setup=SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"{key:21} {value}")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 346 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case_( unittest.TestCase ):
def __init__( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any]=7 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : Optional[Any]=1_8 , UpperCamelCase_ : Union[str, Any]=3_0 , UpperCamelCase_ : Tuple=4_0_0 , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=True , UpperCamelCase_ : int=[0.5, 0.5, 0.5] , UpperCamelCase_ : Dict=[0.5, 0.5, 0.5] , UpperCamelCase_ : int=False , ):
lowerCAmelCase : List[str] = size if size is not None else {'''height''': 2_0, '''width''': 2_0}
lowerCAmelCase : str = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
lowerCAmelCase : Any = parent
lowerCAmelCase : List[Any] = batch_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : Optional[Any] = image_size
lowerCAmelCase : Tuple = min_resolution
lowerCAmelCase : Union[str, Any] = max_resolution
lowerCAmelCase : Dict = do_resize
lowerCAmelCase : Dict = size
lowerCAmelCase : Any = do_center_crop
lowerCAmelCase : Tuple = crop_size
lowerCAmelCase : Union[str, Any] = do_normalize
lowerCAmelCase : Optional[int] = image_mean
lowerCAmelCase : Union[str, Any] = image_std
lowerCAmelCase : int = do_reduce_labels
def lowerCamelCase__ ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _snake_case ( ):
lowerCAmelCase : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowerCAmelCase : List[str] = Image.open(dataset[0]['''file'''] )
lowerCAmelCase : List[str] = Image.open(dataset[1]['''file'''] )
return image, map
def _snake_case ( ):
lowerCAmelCase : str = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowerCAmelCase : List[str] = Image.open(ds[0]['''file'''] )
lowerCAmelCase : Union[str, Any] = Image.open(ds[1]['''file'''] )
lowerCAmelCase : Optional[Any] = Image.open(ds[2]['''file'''] )
lowerCAmelCase : Any = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = BeitImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : str = BeitImageProcessingTester(self )
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 2_0, '''width''': 2_0} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
self.assertEqual(image_processor.do_reduce_labels , UpperCamelCase_ )
lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , crop_size=8_4 , reduce_labels=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
self.assertEqual(image_processor.do_reduce_labels , UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
pass
def lowerCamelCase__ ( self : int ):
# Initialize image_processing
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase : List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase__ ( self : Tuple ):
# Initialize image_processing
lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase : Any = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase__ ( self : Optional[int] ):
# Initialize image_processing
lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase : int = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase__ ( self : Dict ):
# Initialize image_processing
lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = []
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
lowerCAmelCase : Any = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
# Test batched
lowerCAmelCase : Union[str, Any] = image_processing(UpperCamelCase_ , UpperCamelCase_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
# Test not batched input (PIL images)
lowerCAmelCase, lowerCAmelCase : Any = prepare_semantic_single_inputs()
lowerCAmelCase : str = image_processing(UpperCamelCase_ , UpperCamelCase_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
# Test batched input (PIL images)
lowerCAmelCase, lowerCAmelCase : List[Any] = prepare_semantic_batch_inputs()
lowerCAmelCase : Tuple = image_processing(UpperCamelCase_ , UpperCamelCase_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
def lowerCamelCase__ ( self : Optional[Any] ):
# Initialize image_processing
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = prepare_semantic_single_inputs()
lowerCAmelCase : List[Any] = image_processing(UpperCamelCase_ , UpperCamelCase_ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_5_0 )
lowerCAmelCase : int = True
lowerCAmelCase : str = image_processing(UpperCamelCase_ , UpperCamelCase_ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
| 314 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
class snake_case_( a__ ):
__UpperCamelCase = CLIPConfig
__UpperCamelCase = ['''CLIPEncoderLayer''']
def __init__( self : List[Any] , UpperCamelCase_ : CLIPConfig ):
super().__init__(UpperCamelCase_ )
lowerCAmelCase : str = CLIPVisionModelWithProjection(config.vision_config )
lowerCAmelCase : Any = nn.Linear(config.vision_config.projection_dim , 1 )
lowerCAmelCase : Dict = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=0.5 , UpperCamelCase_ : List[str]=0.5 ):
lowerCAmelCase : List[Any] = self.vision_model(UpperCamelCase_ )[0]
lowerCAmelCase : Tuple = self.p_head(UpperCamelCase_ )
lowerCAmelCase : Any = nsfw_detected.flatten()
lowerCAmelCase : Dict = nsfw_detected > p_threshold
lowerCAmelCase : int = nsfw_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(UpperCamelCase_ ):
if nsfw_detected_:
lowerCAmelCase : List[Any] = np.zeros(images[idx].shape )
lowerCAmelCase : Union[str, Any] = self.w_head(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = watermark_detected.flatten()
lowerCAmelCase : Optional[int] = watermark_detected > w_threshold
lowerCAmelCase : Union[str, Any] = watermark_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(UpperCamelCase_ ):
if watermark_detected_:
lowerCAmelCase : List[str] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 314 | 1 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(a__ )
__SCREAMING_SNAKE_CASE = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(a__ )
__SCREAMING_SNAKE_CASE = tf.cast(math.pi , x.dtype )
__SCREAMING_SNAKE_CASE = tf.cast(0.044_715 , x.dtype )
__SCREAMING_SNAKE_CASE = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(a__ , 3 )) ))
return x * cdf
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(a__ )
return x * tf.tanh(tf.math.softplus(a__ ) )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(a__ )
__SCREAMING_SNAKE_CASE = tf.cast(0.044_715 , x.dtype )
__SCREAMING_SNAKE_CASE = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(a__ )
__SCREAMING_SNAKE_CASE = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def a__ ( a__ ):
"""simple docstring"""
return tf.clip_by_value(_gelu(a__ ) , -10 , 10 )
def a__ ( a__ , a__=-1 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tf.split(a__ , 2 , axis=a__ )
return a * tf.math.sigmoid(a__ )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def a__ ( a__ ):
"""simple docstring"""
return tf.keras.activations.gelu(a__ , approximate=a__ )
UpperCAmelCase : Union[str, Any] = tf.keras.activations.gelu
UpperCAmelCase : str = approximate_gelu_wrap
else:
UpperCAmelCase : Optional[int] = _gelu
UpperCAmelCase : Optional[int] = _gelu_new
UpperCAmelCase : List[Any] = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def a__ ( a__ ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 267 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
UpperCAmelCase : int = random.Random()
def a__ ( a__ , a__=1.0 , a__=None , a__=None ):
"""simple docstring"""
if rng is None:
__SCREAMING_SNAKE_CASE = global_rng
__SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str=7 , __SCREAMING_SNAKE_CASE : List[str]=400 , __SCREAMING_SNAKE_CASE : Any=2_000 , __SCREAMING_SNAKE_CASE : List[str]=10 , __SCREAMING_SNAKE_CASE : Optional[int]=160 , __SCREAMING_SNAKE_CASE : List[str]=8 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : Dict=4_000 , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : List[Any]=True , ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = min_seq_length
__SCREAMING_SNAKE_CASE = max_seq_length
__SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__SCREAMING_SNAKE_CASE = padding_value
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = return_attention_mask
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = feature_size
__SCREAMING_SNAKE_CASE = chunk_length
__SCREAMING_SNAKE_CASE = hop_length
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> Union[str, Any]:
"""simple docstring"""
def _flatten(__SCREAMING_SNAKE_CASE : Dict ):
return list(itertools.chain(*__SCREAMING_SNAKE_CASE ) )
if equal_length:
__SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__SCREAMING_SNAKE_CASE = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(__SCREAMING_SNAKE_CASE )[0]
check_json_file_has_correct_format(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
__SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
__SCREAMING_SNAKE_CASE = feat_extract_first.mel_filters
__SCREAMING_SNAKE_CASE = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = os.path.join(__SCREAMING_SNAKE_CASE , """feat_extract.json""" )
feat_extract_first.to_json_file(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
__SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
__SCREAMING_SNAKE_CASE = feat_extract_first.mel_filters
__SCREAMING_SNAKE_CASE = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test feature size
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__SCREAMING_SNAKE_CASE = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
__SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test batched
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__SCREAMING_SNAKE_CASE = np.asarray(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test truncation required
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
__SCREAMING_SNAKE_CASE = [x[: feature_extractor.n_samples] for x in speech_inputs]
__SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs_truncated]
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
import torch
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__SCREAMING_SNAKE_CASE = np.random.rand(100 , 32 ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__SCREAMING_SNAKE_CASE = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__SCREAMING_SNAKE_CASE = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Tuple ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__SCREAMING_SNAKE_CASE = ds.sort("""id""" ).select(range(__SCREAMING_SNAKE_CASE ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
__SCREAMING_SNAKE_CASE = WhisperFeatureExtractor()
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__SCREAMING_SNAKE_CASE = self._load_datasamples(1 )[0]
__SCREAMING_SNAKE_CASE = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
__SCREAMING_SNAKE_CASE = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__SCREAMING_SNAKE_CASE )[0]
self.assertTrue(np.all(np.mean(__SCREAMING_SNAKE_CASE ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__SCREAMING_SNAKE_CASE ) - 1 ) < 1E-3 ) )
| 267 | 1 |
"""simple docstring"""
from collections.abc import Sequence
def lowercase ( lowerCAmelCase__ : Sequence[float] , lowerCAmelCase__ : bool = False ) -> float:
if not arr:
return 0
__a = 0 if allow_empty_subarrays else float('''-inf''' )
__a = 0.0
for num in arr:
__a = max(0 if allow_empty_subarrays else num , curr_sum + num )
__a = max(snake_case__ , snake_case__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowerCAmelCase = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 364 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = ['image_processor', 'tokenizer']
__UpperCAmelCase : str = 'LayoutLMv3ImageProcessor'
__UpperCAmelCase : Optional[int] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self , _a=None , _a=None , **_a ):
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self , _a , _a = None , _a = None , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
__a = self.image_processor(images=_a , return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a ):
__a = [text] # add batch dimension (as the image processor always adds a batch dimension)
__a = features['''words''']
__a = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
__a = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__a = self.get_overflowing_images(_a , encoded_inputs['''overflow_to_sample_mapping'''] )
__a = images
return encoded_inputs
def __UpperCAmelCase ( self , _a , _a ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(_a )} and {len(_a )}''' )
return images_with_overflow
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@property
def __UpperCAmelCase ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 11 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__A : List[str] = None
__A : List[str] = logging.get_logger(__name__)
__A : List[str] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A : str = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
__A : Optional[Any] = {
'''facebook/mbart-large-en-ro''': 1_024,
'''facebook/mbart-large-cc25''': 1_024,
}
# fmt: off
__A : Tuple = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ : List[str] = MBartTokenizer
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Optional[int] = []
def __init__( self : Optional[int] , A : Any=None , A : Union[str, Any]=None , A : Optional[int]="<s>" , A : Dict="</s>" , A : Union[str, Any]="</s>" , A : Tuple="<s>" , A : List[Any]="<unk>" , A : Optional[Any]="<pad>" , A : List[Any]="<mask>" , A : str=None , A : Tuple=None , A : Optional[Any]=None , **A : Union[str, Any] , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
lowercase_ : Union[str, Any] = vocab_file
lowercase_ : int = False if not self.vocab_file else True
lowercase_ : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
lowercase_ : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase_ : Optional[int] = src_lang if src_lang is not None else "en_XX"
lowercase_ : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang )
lowercase_ : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A ( self : int ) -> str:
return self._src_lang
@src_lang.setter
def A ( self : Tuple , A : Union[str, Any] ) -> None:
lowercase_ : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A ( self : List[Any] , A : str , A : str = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A ( self : Dict , A : List[Any] , A : Union[str, Any] = None ) -> List[int]:
lowercase_ : List[str] = [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A ( self : Tuple , A : Dict , A : Tuple , A : Optional[Any] , A : Any , **A : str ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase_ : str = src_lang
lowercase_ : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
lowercase_ : Tuple = self.convert_tokens_to_ids(__lowerCamelCase )
lowercase_ : Dict = tgt_lang_id
return inputs
def A ( self : Optional[int] , A : Union[str, Any] , A : Dict = "en_XX" , A : Union[str, Any] = None , A : Dict = "ro_RO" , **A : Dict , ) -> BatchEncoding:
lowercase_ : Any = src_lang
lowercase_ : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
def A ( self : List[Any] ) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang )
def A ( self : Any ) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A ( self : List[Any] , A : List[Any] ) -> None:
lowercase_ : int = self.convert_tokens_to_ids(__lowerCamelCase )
lowercase_ : int = []
lowercase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
lowercase_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : str = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A ( self : Union[str, Any] , A : List[str] ) -> None:
lowercase_ : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase )
lowercase_ : List[Any] = []
lowercase_ : str = [self.eos_token_id, self.cur_lang_code]
lowercase_ : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A ( self : Optional[Any] , A : Optional[int] , A : Tuple = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowercase_ : int = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 33 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=1_6 , __lowerCamelCase=[1, 2, 1] , __lowerCamelCase=[2, 2, 4] , __lowerCamelCase=2 , __lowerCamelCase=2.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=8 , __lowerCamelCase=["stage1", "stage2", "stage3"] , __lowerCamelCase=[1, 2, 3] , ) -> Optional[Any]:
_A : int = parent
_A : Optional[Any] = batch_size
_A : str = image_size
_A : Tuple = patch_size
_A : Tuple = num_channels
_A : Optional[int] = embed_dim
_A : Dict = depths
_A : Any = num_heads
_A : Any = window_size
_A : int = mlp_ratio
_A : Any = qkv_bias
_A : Union[str, Any] = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Dict = drop_path_rate
_A : List[Any] = hidden_act
_A : Any = use_absolute_embeddings
_A : Optional[int] = patch_norm
_A : Tuple = layer_norm_eps
_A : List[str] = initializer_range
_A : Optional[int] = is_training
_A : Optional[Any] = scope
_A : Optional[int] = use_labels
_A : Dict = type_sequence_label_size
_A : str = encoder_stride
_A : Optional[int] = out_features
_A : Optional[int] = out_indices
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_A : Optional[Any] = None
if self.use_labels:
_A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_A : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
_A : Dict = MaskFormerSwinModel(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : int = model(__lowerCamelCase)
_A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict:
_A : Optional[Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : Dict = model(__lowerCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [1_3, 1_6, 1_6, 1_6])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4])
# verify ValueError
with self.parent.assertRaises(__lowerCamelCase):
_A : Union[str, Any] = ["stem"]
_A : Union[str, Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
_A : Any = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def _lowerCamelCase ( self) -> str:
_A : Union[str, Any] = MaskFormerSwinModelTester(self)
_A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=3_7)
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
))
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self) -> str:
return
def _lowerCamelCase ( self) -> List[Any]:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase)
@unittest.skip("Swin does not use inputs_embeds")
def _lowerCamelCase ( self) -> str:
pass
@unittest.skip("Swin does not support feedforward chunking")
def _lowerCamelCase ( self) -> List[Any]:
pass
def _lowerCamelCase ( self) -> Optional[int]:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(__lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_A : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear))
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(__lowerCamelCase)
_A : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : int = [*signature.parameters.keys()]
_A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase)
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions")
def _lowerCamelCase ( self) -> Tuple:
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Any = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
with torch.no_grad():
_A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase))
_A : Tuple = outputs.hidden_states
_A : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1)
self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase)
# Swin has a different seq_length
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self) -> Dict:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[int] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Tuple:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[int] = 3
_A : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_A : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__lowerCamelCase):
_A : Optional[int] = 0
return t
def check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase={}):
with torch.no_grad():
_A : Any = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase)
_A : int = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase).to_tuple()
def recursive_check(__lowerCamelCase , __lowerCamelCase):
if isinstance(__lowerCamelCase , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif isinstance(__lowerCamelCase , __lowerCamelCase):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values()):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__lowerCamelCase) , set_nan_tensor_to_zero(__lowerCamelCase) , atol=1e-5) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
F" {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}. Dict has"
F" `nan`: {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}."
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase)
for model_class in self.all_model_classes:
_A : List[Any] = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
_A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
@require_torch
class lowerCAmelCase__ ( unittest.TestCase , a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = MaskFormerSwinConfig
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = MaskFormerSwinModelTester(self)
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Union[str, Any] = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
_A : Optional[Any] = backbone_class(__lowerCamelCase)
backbone.to(__lowerCamelCase)
backbone.eval()
_A : List[Any] = backbone(**__lowerCamelCase)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __lowerCamelCase)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
_A : List[str] = backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names))
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_A , _A , _A : List[str] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
_A : int = backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase)
self.assertIsNotNone(outputs.attentions)
| 11 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Any = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
_UpperCAmelCase : List[str] = 'encoder-decoder'
_UpperCAmelCase : Optional[Any] = True
def __init__( self : Optional[Any] , **A : Optional[Any] ) ->Optional[Any]:
super().__init__(**_UpperCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCamelCase__ : Optional[Any] = kwargs.pop('''encoder''' )
lowerCamelCase__ : str = encoder_config.pop('''model_type''' )
lowerCamelCase__ : Optional[Any] = kwargs.pop('''decoder''' )
lowerCamelCase__ : Dict = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCamelCase__ : Tuple = AutoConfig.for_model(_UpperCamelCase , **_UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = AutoConfig.for_model(_UpperCamelCase , **_UpperCamelCase )
lowerCamelCase__ : int = True
@classmethod
def __lowerCamelCase ( cls : Optional[Any] , A : PretrainedConfig , A : PretrainedConfig , **A : List[str] ) ->List[str]:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Optional[int] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCamelCase )
def __lowerCamelCase ( self : str ) ->List[Any]:
lowerCamelCase__ : List[str] = copy.deepcopy(self.__dict__ )
lowerCamelCase__ : int = self.encoder.to_dict()
lowerCamelCase__ : List[str] = self.decoder.to_dict()
lowerCamelCase__ : int = self.__class__.model_type
return output
| 358 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_A : List[str] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Any = ["audio_values", "audio_mask"]
def __init__( self : Any , A : Union[str, Any]=2_0_4_8 , A : Any=1 , A : int=[1_6, 1_6] , A : Any=1_2_8 , A : List[Any]=4_4_1_0_0 , A : Dict=8_6 , A : Dict=2_0_4_8 , A : str=0.0 , **A : Union[str, Any] , ) ->List[Any]:
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , **A , )
lowerCamelCase__ : Dict = spectrogram_length
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Any = feature_size // self.patch_size[1]
lowerCamelCase__ : Union[str, Any] = n_fft
lowerCamelCase__ : Union[str, Any] = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase__ : Optional[Any] = sampling_rate
lowerCamelCase__ : Optional[Any] = padding_value
lowerCamelCase__ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=A , norm='''slaney''' , mel_scale='''slaney''' , ).T
def __lowerCamelCase ( self : List[Any] , A : np.array ) ->np.ndarray:
lowerCamelCase__ : Any = spectrogram(
A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
lowerCamelCase__ : Any = log_spec[:, :-1]
lowerCamelCase__ : int = log_spec - 20.0
lowerCamelCase__ : int = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Union[str, Any] , A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , A : Optional[Union[str, TensorType]] = None , A : Optional[bool] = True , A : Optional[int] = None , A : bool = False , A : bool = False , **A : str , ) ->BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCamelCase__ : List[str] = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
lowerCamelCase__ : Any = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ : Optional[int] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
lowerCamelCase__ : Optional[int] = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ : int = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase__ : List[str] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A ):
lowerCamelCase__ : Dict = [np.asarray(A , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase__ : Optional[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase__ : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase__ : List[str] = np.array(A ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase__ : str = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase__ : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase__ : Tuple = padded_audio_features * self.padding_value
for i in range(len(A ) ):
lowerCamelCase__ : int = audio_features[i]
lowerCamelCase__ : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase__ : Optional[int] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
lowerCamelCase__ : Tuple = {'''audio_values''': padded_audio_features}
lowerCamelCase__ : Union[str, Any] = BatchFeature(data=A , tensor_type=A )
return encoded_inputs
| 265 | 0 |
"""simple docstring"""
from math import pow, sqrt
def _A ( *UpperCamelCase_ : float) -> bool:
'''simple docstring'''
__lowercase = len(UpperCamelCase_) > 0 and all(value > 0.0 for value in values)
return result
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a), 6)
if validate(UpperCamelCase_, UpperCamelCase_)
else ValueError("Input Error: Molar mass values must greater than 0.")
)
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : float) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a), 6)
if validate(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : float) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a), 6)
if validate(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : float) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a, 2), 6)
if validate(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : float) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a, 2) / molar_mass, 6)
if validate(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
| 17 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class UpperCAmelCase :
def __init__(self : Optional[Any] , snake_case__ : Optional[Any]=None , **snake_case__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
snake_case : Optional[Any] = model
snake_case : Dict = kwargs.get("model_save_dir" , snake_case__ )
snake_case : int = kwargs.get("latest_model_name" , snake_case__ )
def __call__(self : Tuple , **snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = {k: np.array(snake_case__ ) for k, v in kwargs.items()}
return self.model.run(snake_case__ , snake_case__ )
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : Union[str, Path] , snake_case__ : Optional[int]=None , snake_case__ : Optional[int]=None ) -> Any:
'''simple docstring'''
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
snake_case : Optional[int] = "CPUExecutionProvider"
return ort.InferenceSession(snake_case__ , providers=[provider] , sess_options=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Union[str, Path] , snake_case__ : Optional[str] = None , **snake_case__ : Any ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
snake_case : Any = self.model_save_dir.joinpath(self.latest_model_name )
snake_case : str = Path(snake_case__ ).joinpath(snake_case__ )
try:
shutil.copyfile(snake_case__ , snake_case__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
snake_case : List[str] = self.model_save_dir.joinpath(snake_case__ )
if src_path.exists():
snake_case : Tuple = Path(snake_case__ ).joinpath(snake_case__ )
try:
shutil.copyfile(snake_case__ , snake_case__ )
except shutil.SameFileError:
pass
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Optional[int] , ) -> str:
'''simple docstring'''
if os.path.isfile(snake_case__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
# saving model weights/files
self._save_pretrained(snake_case__ , **snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Tuple , snake_case__ : Union[str, Path] , snake_case__ : Optional[Union[bool, str, None]] = None , snake_case__ : Optional[Union[str, None]] = None , snake_case__ : bool = False , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional["ort.SessionOptions"] = None , **snake_case__ : Tuple , ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(snake_case__ ):
snake_case : Any = OnnxRuntimeModel.load_model(
os.path.join(snake_case__ , snake_case__ ) , provider=snake_case__ , sess_options=snake_case__ )
snake_case : Union[str, Any] = Path(snake_case__ )
# load model from hub
else:
# download model
snake_case : Dict = hf_hub_download(
repo_id=snake_case__ , filename=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , )
snake_case : List[Any] = Path(snake_case__ ).parent
snake_case : Union[str, Any] = Path(snake_case__ ).name
snake_case : Dict = OnnxRuntimeModel.load_model(snake_case__ , provider=snake_case__ , sess_options=snake_case__ )
return cls(model=snake_case__ , **snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Optional[Any] , snake_case__ : Union[str, Path] , snake_case__ : bool = True , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , **snake_case__ : Dict , ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = None
if len(str(snake_case__ ).split("@" ) ) == 2:
snake_case , snake_case : int = model_id.split("@" )
return cls._from_pretrained(
model_id=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , use_auth_token=snake_case__ , **snake_case__ , )
| 59 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : Any = StableDiffusionPanoramaPipeline
a__ : Any = TEXT_TO_IMAGE_PARAMS
a__ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
UpperCAmelCase_= DDIMScheduler()
torch.manual_seed(0 )
UpperCAmelCase_= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCAmelCase_= CLIPTextModel(__UpperCAmelCase )
UpperCAmelCase_= CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase_= {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str]=0 ) -> List[str]:
UpperCAmelCase_= torch.manual_seed(__UpperCAmelCase )
UpperCAmelCase_= {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
UpperCAmelCase_= """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_= self.get_dummy_components()
UpperCAmelCase_= StableDiffusionPanoramaPipeline(**__UpperCAmelCase )
UpperCAmelCase_= sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_= sd_pipe(**__UpperCAmelCase ).images
UpperCAmelCase_= image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
UpperCAmelCase_= """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_= self.get_dummy_components()
UpperCAmelCase_= StableDiffusionPanoramaPipeline(**__UpperCAmelCase )
UpperCAmelCase_= sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_= """french fries"""
UpperCAmelCase_= sd_pipe(**__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
UpperCAmelCase_= output.images
UpperCAmelCase_= image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
UpperCAmelCase_= """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_= self.get_dummy_components()
UpperCAmelCase_= StableDiffusionPanoramaPipeline(**__UpperCAmelCase )
UpperCAmelCase_= sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_= sd_pipe(**__UpperCAmelCase , view_batch_size=2 )
UpperCAmelCase_= output.images
UpperCAmelCase_= image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
UpperCAmelCase_= """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_= self.get_dummy_components()
UpperCAmelCase_= EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
UpperCAmelCase_= StableDiffusionPanoramaPipeline(**__UpperCAmelCase )
UpperCAmelCase_= sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_= sd_pipe(**__UpperCAmelCase ).images
UpperCAmelCase_= image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
UpperCAmelCase_= """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_= self.get_dummy_components()
UpperCAmelCase_= PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=__UpperCAmelCase )
UpperCAmelCase_= StableDiffusionPanoramaPipeline(**__UpperCAmelCase )
UpperCAmelCase_= sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_= sd_pipe(**__UpperCAmelCase ).images
UpperCAmelCase_= image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : int=0 ) -> Any:
UpperCAmelCase_= torch.manual_seed(__UpperCAmelCase )
UpperCAmelCase_= {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
UpperCAmelCase_= """stabilityai/stable-diffusion-2-base"""
UpperCAmelCase_= DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="""scheduler""" )
UpperCAmelCase_= StableDiffusionPanoramaPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_= self.get_inputs()
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
UpperCAmelCase_= image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
UpperCAmelCase_= np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
UpperCAmelCase_= StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=__UpperCAmelCase )
UpperCAmelCase_= LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_= self.get_inputs()
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
UpperCAmelCase_= image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
UpperCAmelCase_= np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
UpperCAmelCase_= 0
def callback_fn(__UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : torch.FloatTensor ) -> None:
UpperCAmelCase_= True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase_= latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
UpperCAmelCase_= latents[0, -3:, -3:, -1]
UpperCAmelCase_= np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
UpperCAmelCase_= latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
UpperCAmelCase_= latents[0, -3:, -3:, -1]
UpperCAmelCase_= np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
UpperCAmelCase_= False
UpperCAmelCase_= """stabilityai/stable-diffusion-2-base"""
UpperCAmelCase_= DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="""scheduler""" )
UpperCAmelCase_= StableDiffusionPanoramaPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase )
UpperCAmelCase_= pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_= self.get_inputs()
pipe(**__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_= """stabilityai/stable-diffusion-2-base"""
UpperCAmelCase_= DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="""scheduler""" )
UpperCAmelCase_= StableDiffusionPanoramaPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase )
UpperCAmelCase_= pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_= self.get_inputs()
UpperCAmelCase_= pipe(**__UpperCAmelCase )
UpperCAmelCase_= torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 277 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class lowercase :
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : List[Any]=13 , __UpperCAmelCase : Dict=7 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Dict=99 , __UpperCAmelCase : Union[str, Any]=64 , __UpperCAmelCase : Dict=5 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=37 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Union[str, Any]=512 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Tuple=4 , __UpperCAmelCase : str=None , ) -> str:
UpperCAmelCase_= parent
UpperCAmelCase_= batch_size
UpperCAmelCase_= seq_length
UpperCAmelCase_= is_training
UpperCAmelCase_= use_input_mask
UpperCAmelCase_= use_token_type_ids
UpperCAmelCase_= use_labels
UpperCAmelCase_= vocab_size
UpperCAmelCase_= hidden_size
UpperCAmelCase_= num_hidden_layers
UpperCAmelCase_= num_attention_heads
UpperCAmelCase_= intermediate_size
UpperCAmelCase_= hidden_act
UpperCAmelCase_= hidden_dropout_prob
UpperCAmelCase_= attention_probs_dropout_prob
UpperCAmelCase_= max_position_embeddings
UpperCAmelCase_= type_vocab_size
UpperCAmelCase_= type_sequence_label_size
UpperCAmelCase_= initializer_range
UpperCAmelCase_= num_labels
UpperCAmelCase_= num_choices
UpperCAmelCase_= scope
UpperCAmelCase_= vocab_size - 1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase_= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_= None
if self.use_input_mask:
UpperCAmelCase_= random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_= None
if self.use_labels:
UpperCAmelCase_= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_= self.get_config()
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.prepare_config_and_inputs()
UpperCAmelCase_= True
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_= GPTNeoXModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
UpperCAmelCase_= model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple ) -> Dict:
UpperCAmelCase_= True
UpperCAmelCase_= GPTNeoXModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict ) -> int:
UpperCAmelCase_= GPTNeoXForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= GPTNeoXForQuestionAnswering(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= GPTNeoXForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict ) -> Dict:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= GPTNeoXForTokenClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : int ) -> Optional[int]:
UpperCAmelCase_= True
UpperCAmelCase_= GPTNeoXForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
UpperCAmelCase_= outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_= ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_= ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_= torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_= torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
UpperCAmelCase_= output_from_no_past["""hidden_states"""][0]
UpperCAmelCase_= model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase_= ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_= output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_= output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
UpperCAmelCase_= self.prepare_config_and_inputs()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= config_and_inputs
UpperCAmelCase_= {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : Union[str, Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
a__ : Any = (GPTNeoXForCausalLM,) if is_torch_available() else ()
a__ : str = (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : Optional[int] = False
a__ : Tuple = False
a__ : int = False
a__ : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
UpperCAmelCase_= GPTNeoXModelTester(self )
UpperCAmelCase_= ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=64 , num_attention_heads=8 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
# This regression test was failing with PyTorch < 1.3
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase_= None
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : Any ) -> Dict:
UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_= ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase_= ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_= GPTNeoXModel(__UpperCAmelCase )
original_model.to(__UpperCAmelCase )
original_model.eval()
UpperCAmelCase_= original_model(__UpperCAmelCase ).last_hidden_state
UpperCAmelCase_= original_model(__UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_= {"""type""": scaling_type, """factor""": 10.0}
UpperCAmelCase_= GPTNeoXModel(__UpperCAmelCase )
scaled_model.to(__UpperCAmelCase )
scaled_model.eval()
UpperCAmelCase_= scaled_model(__UpperCAmelCase ).last_hidden_state
UpperCAmelCase_= scaled_model(__UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
@require_torch
class lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
UpperCAmelCase_= AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCAmelCase_= GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__UpperCAmelCase )
UpperCAmelCase_= tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__UpperCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCAmelCase_= """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCAmelCase_= model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=20 )
UpperCAmelCase_= tokenizer.batch_decode(__UpperCAmelCase )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 277 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Dict ) -> List[str]:
__lowerCAmelCase: str = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
__lowerCAmelCase: Union[str, Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
sd_pipe.set_scheduler('sample_euler' )
__lowerCAmelCase: List[str] = """A painting of a squirrel eating a burger"""
__lowerCAmelCase: Optional[int] = torch.manual_seed(0 )
__lowerCAmelCase: List[str] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
__lowerCAmelCase: Optional[Any] = output.images
__lowerCAmelCase: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase: List[Any] = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self : List[Any] ) -> Any:
__lowerCAmelCase: List[str] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__lowerCAmelCase: Tuple = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
sd_pipe.set_scheduler('sample_euler' )
__lowerCAmelCase: Optional[Any] = """A painting of a squirrel eating a burger"""
__lowerCAmelCase: List[str] = torch.manual_seed(0 )
__lowerCAmelCase: List[Any] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
__lowerCAmelCase: Tuple = output.images
__lowerCAmelCase: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase: Optional[Any] = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def UpperCAmelCase ( self : int ) -> int:
__lowerCAmelCase: int = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__lowerCAmelCase: Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
__lowerCAmelCase: Optional[int] = """A painting of a squirrel eating a burger"""
__lowerCAmelCase: Optional[int] = torch.manual_seed(0 )
__lowerCAmelCase: Union[str, Any] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=lowerCamelCase_ , )
__lowerCAmelCase: Any = output.images
__lowerCAmelCase: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase: Dict = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 322 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCamelCase_ )
return config
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0]
SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1]
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = 1
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = scheduler.timesteps
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCamelCase_ ):
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Dict = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0]
with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 323 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : str = filter(lambda _UpperCAmelCase : p.requires_grad, model.parameters() )
__UpperCAmelCase : Optional[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase__ : str = logging.getLogger(__name__)
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
if metric == "rouge2":
__UpperCAmelCase : Any = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__UpperCAmelCase : List[Any] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__UpperCAmelCase : List[str] = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
" function." )
__UpperCAmelCase : Tuple = ModelCheckpoint(
dirpath=_A, filename=_A, monitor=F"val_{metric}", mode="max", save_top_k=3, every_n_epochs=1, )
return checkpoint_callback
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
return EarlyStopping(
monitor=F"val_{metric}", mode="min" if "loss" in metric else "max", patience=_A, verbose=_A, )
class SCREAMING_SNAKE_CASE__ ( pl.Callback ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = {f"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case__ )
@rank_zero_only
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple=True ):
"""simple docstring"""
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" )
__UpperCAmelCase : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
__UpperCAmelCase : Any = Path(pl_module.hparams.output_dir )
if type_path == "test":
__UpperCAmelCase : int = od / 'test_results.txt'
__UpperCAmelCase : str = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__UpperCAmelCase : Union[str, Any] = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
__UpperCAmelCase : List[Any] = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=snake_case__ )
generations_file.parent.mkdir(exist_ok=snake_case__ )
with open(snake_case__ , "a+" ) as writer:
for key in sorted(snake_case__ ):
if key in ["log", "progress_bar", "preds"]:
continue
__UpperCAmelCase : Any = metrics[key]
if isinstance(snake_case__ , torch.Tensor ):
__UpperCAmelCase : List[Any] = val.item()
__UpperCAmelCase : List[Any] = f"{key}: {val:.6f}\n"
writer.write(snake_case__ )
if not save_generations:
return
if "preds" in metrics:
__UpperCAmelCase : Union[str, Any] = '\n'.join(metrics["preds"] )
generations_file.open("w+" ).write(snake_case__ )
@rank_zero_only
def lowerCamelCase_ ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
try:
__UpperCAmelCase : Optional[int] = pl_module.model.model.num_parameters()
except AttributeError:
__UpperCAmelCase : Optional[int] = pl_module.model.num_parameters()
__UpperCAmelCase : int = count_trainable_parameters(snake_case__ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case__ , snake_case__ , "test" )
@rank_zero_only
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 353 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : Any = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Tuple = PegasusTokenizer(UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def lowerCamelCase_ ( self : List[Any] , **UpperCAmelCase_ : List[str] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : int ):
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "</s>"
__UpperCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "</s>" )
self.assertEqual(vocab_keys[-1] , "v" )
self.assertEqual(len(UpperCAmelCase_ ) , 1_103 )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_103 )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Tuple = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
__UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
__UpperCAmelCase : int = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
__UpperCAmelCase : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__UpperCAmelCase : Tuple = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
__UpperCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
__UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : Dict = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
__UpperCAmelCase : Tuple = "To ensure a smooth flow of bank resolutions."
__UpperCAmelCase : str = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
__UpperCAmelCase : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ["This is going to be way too long." * 150, "short example"]
__UpperCAmelCase : Optional[int] = ["not super long but more than 5 tokens", "tiny"]
__UpperCAmelCase : str = self._large_tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
__UpperCAmelCase : Union[str, Any] = self._large_tokenizer(
text_target=UpperCAmelCase_ , max_length=5 , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase_ ) == 2 # input_ids, attention_mask.
@slow
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
# fmt: off
__UpperCAmelCase : Tuple = {"input_ids": [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : List[str] = PegasusTokenizer(UpperCAmelCase_ , offset=0 , mask_token_sent=UpperCAmelCase_ , mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase_ : int ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : List[str] = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
__UpperCAmelCase : str = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
__UpperCAmelCase : int = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_torch
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = ["This is going to be way too long." * 1_000, "short example"]
__UpperCAmelCase : List[Any] = ["not super long but more than 5 tokens", "tiny"]
__UpperCAmelCase : int = self._large_tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
__UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=UpperCAmelCase_ , max_length=5 , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase_ ) == 2 # input_ids, attention_mask.
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
__UpperCAmelCase : int = self._large_tokenizer(UpperCAmelCase_ ).input_ids
self.assertListEqual(
UpperCAmelCase_ , [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] , )
| 37 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int):
a : str = jnp.ones((batch_size, length)) / length
return scores
def __snake_case ( self : Optional[Any]):
a : Optional[int] = None
a : Optional[Any] = 20
a : Optional[int] = self._get_uniform_logits(batch_size=2 , length=__UpperCAmelCase)
# tweak scores to not be uniform anymore
a : int = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
a : Union[str, Any] = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
a : Dict = jax.nn.softmax(__UpperCAmelCase , axis=-1)
a : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5)
a : Dict = FlaxTemperatureLogitsWarper(temperature=1.3)
a : Any = jax.nn.softmax(temp_dist_warper_sharper(__UpperCAmelCase , scores.copy() , cur_len=__UpperCAmelCase) , axis=-1)
a : Any = jax.nn.softmax(temp_dist_warper_smoother(__UpperCAmelCase , scores.copy() , cur_len=__UpperCAmelCase) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def __snake_case ( self : List[str]):
a : Optional[int] = None
a : Dict = 10
a : str = 2
# create ramp distribution
a : Union[str, Any] = np.broadcast_to(np.arange(__UpperCAmelCase)[None, :] , (batch_size, vocab_size)).copy()
a : Optional[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
a : str = FlaxTopKLogitsWarper(3)
a : Union[str, Any] = top_k_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
a : List[Any] = 5
a : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
a : Dict = np.broadcast_to(np.arange(__UpperCAmelCase)[None, :] , (batch_size, length)).copy()
a : Any = top_k_warp_safety_check(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def __snake_case ( self : Tuple):
a : List[Any] = None
a : int = 10
a : Dict = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
a : List[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
a : List[Any] = FlaxTopPLogitsWarper(0.8)
a : List[str] = np.exp(top_p_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
a : str = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3))
# check edge cases with negative and extreme logits
a : int = np.broadcast_to(np.arange(__UpperCAmelCase)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
a : Optional[int] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
a : int = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
a : str = top_p_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def __snake_case ( self : List[str]):
a : Union[str, Any] = 20
a : Any = 4
a : Any = 0
a : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__UpperCAmelCase)
# check that min length is applied at length 5
a : Dict = ids_tensor((batch_size, 20) , vocab_size=20)
a : Any = 5
a : List[str] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = min_dist_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf")])
# check that min length is not applied anymore at length 15
a : Any = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[Any] = 15
a : Dict = min_dist_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
self.assertFalse(jnp.isinf(__UpperCAmelCase).any())
def __snake_case ( self : str):
a : Dict = 20
a : Any = 4
a : str = 0
a : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__UpperCAmelCase)
# check that all scores are -inf except the bos_token_id score
a : Dict = ids_tensor((batch_size, 1) , vocab_size=20)
a : List[Any] = 1
a : List[Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : int = logits_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
a : Optional[Any] = 3
a : List[Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[Any] = logits_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
self.assertFalse(jnp.isinf(__UpperCAmelCase).any())
def __snake_case ( self : Dict):
a : List[Any] = 20
a : Tuple = 4
a : List[Any] = 0
a : str = 5
a : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=__UpperCAmelCase , eos_token_id=__UpperCAmelCase)
# check that all scores are -inf except the eos_token_id when max_length is reached
a : Union[str, Any] = ids_tensor((batch_size, 4) , vocab_size=20)
a : Optional[int] = 4
a : Union[str, Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : str = logits_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
a : Any = 3
a : List[Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : List[Any] = logits_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
self.assertFalse(jnp.isinf(__UpperCAmelCase).any())
def __snake_case ( self : Optional[Any]):
a : Optional[Any] = 4
a : int = 10
a : int = 15
a : int = 2
a : List[str] = 1
a : Dict = 15
# dummy input_ids and scores
a : Any = ids_tensor((batch_size, sequence_length) , __UpperCAmelCase)
a : List[Any] = input_ids.copy()
a : Optional[int] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = scores.copy()
# instantiate all dist processors
a : int = FlaxTemperatureLogitsWarper(temperature=0.5)
a : int = FlaxTopKLogitsWarper(3)
a : int = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
a : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__UpperCAmelCase)
a : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__UpperCAmelCase)
a : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__UpperCAmelCase , eos_token_id=__UpperCAmelCase)
a : Tuple = 10
# no processor list
a : str = temp_dist_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : int = top_k_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : Tuple = top_p_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : Optional[Any] = min_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : Dict = bos_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : List[str] = eos_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
# with processor list
a : str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
a : List[Any] = processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
# scores should be equal
self.assertTrue(jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def __snake_case ( self : Optional[Any]):
a : Dict = 4
a : Any = 10
a : Dict = 15
a : Union[str, Any] = 2
a : Tuple = 1
a : int = 15
# dummy input_ids and scores
a : List[str] = ids_tensor((batch_size, sequence_length) , __UpperCAmelCase)
a : List[str] = input_ids.copy()
a : Union[str, Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = scores.copy()
# instantiate all dist processors
a : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5)
a : Dict = FlaxTopKLogitsWarper(3)
a : List[str] = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
a : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__UpperCAmelCase)
a : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__UpperCAmelCase)
a : str = FlaxForcedEOSTokenLogitsProcessor(max_length=__UpperCAmelCase , eos_token_id=__UpperCAmelCase)
a : Optional[int] = 10
# no processor list
def run_no_processor_list(__UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Tuple):
a : List[str] = temp_dist_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : List[str] = top_k_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : Tuple = top_p_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : int = min_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : int = bos_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : List[Any] = eos_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
return scores
# with processor list
def run_processor_list(__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any]):
a : Tuple = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
a : Dict = processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
return scores
a : List[Any] = jax.jit(__UpperCAmelCase)
a : Union[str, Any] = jax.jit(__UpperCAmelCase)
a : List[Any] = jitted_run_no_processor_list(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : List[str] = jitted_run_processor_list(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# scores should be equal
self.assertTrue(jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 40 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any]):
a : str = 0
a : Optional[int] = [0]
a : Union[str, Any] = [0]
a : Any = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
a : List[str] = [60]
a : str = [10]
a : Optional[int] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
def __snake_case ( self : Optional[int]):
a : Any = 3
a : str = [1, 2, 3]
a : Tuple = [3, 2, 1]
a : Any = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5)
def __snake_case ( self : Tuple):
a : int = 50
a : List[Any] = [60, 100, 120]
a : Optional[int] = [10, 20, 30]
a : str = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220)
if __name__ == "__main__":
unittest.main()
| 40 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case__ (A__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE__( __lowercase ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
raise NotImplementedError()
| 266 |
from __future__ import annotations
import math
def lowerCAmelCase_ ( _lowercase : float , _lowercase : int) -> float:
"""simple docstring"""
a__ : Union[str, Any] = u
for i in range(1 , _lowercase):
a__ : Optional[int] = temp * (u - i)
return temp
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
a__ : Tuple = int(input("""enter the numbers of values: """))
a__ : list[list[float]] = []
for _ in range(_lowercase):
y.append([])
for i in range(_lowercase):
for j in range(_lowercase):
y[i].append(_lowercase)
a__ : Optional[Any] = 0
print("""enter the values of parameters in a list: """)
a__ : List[Any] = list(map(_lowercase , input().split()))
print("""enter the values of corresponding parameters: """)
for i in range(_lowercase):
a__ : Optional[Any] = float(input())
a__ : Tuple = int(input("""enter the value to interpolate: """))
a__ : int = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _lowercase):
for j in range(n - i):
a__ : int = y[j + 1][i - 1] - y[j][i - 1]
a__ : Optional[int] = y[0][0]
for i in range(1 , _lowercase):
summ += (ucal(_lowercase , _lowercase) * y[0][i]) / math.factorial(_lowercase)
print(F'''the value at {value} is {summ}''')
if __name__ == "__main__":
main()
| 266 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( __lowerCAmelCase):
A: int = ["image_processor", "tokenizer"]
A: Tuple = "BlipImageProcessor"
A: Optional[Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Dict = self.image_processor
def __call__( self : List[Any] , lowerCamelCase__ : ImageInput = None , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : str , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
UpperCamelCase__ : Union[str, Any] = self.tokenizer
UpperCamelCase__ : Optional[int] = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
UpperCamelCase__ : List[str] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
UpperCamelCase__ : int = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
UpperCamelCase__ : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def UpperCAmelCase__ ( self : List[str] , *lowerCamelCase__ : Any , **lowerCamelCase__ : int ) -> Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : int ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : List[str] = self.tokenizer.model_input_names
UpperCamelCase__ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 146 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__UpperCamelCase : Tuple = logging.getLogger(__name__)
__UpperCamelCase : str = tf.data.AUTOTUNE
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : str = argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=SCREAMING_SNAKE_CASE , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=SCREAMING_SNAKE_CASE , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=SCREAMING_SNAKE_CASE , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=SCREAMING_SNAKE_CASE , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=SCREAMING_SNAKE_CASE , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=SCREAMING_SNAKE_CASE , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=SCREAMING_SNAKE_CASE , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=SCREAMING_SNAKE_CASE , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=SCREAMING_SNAKE_CASE , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=SCREAMING_SNAKE_CASE , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=SCREAMING_SNAKE_CASE , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=SCREAMING_SNAKE_CASE , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=SCREAMING_SNAKE_CASE , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=SCREAMING_SNAKE_CASE , default=0.15 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=SCREAMING_SNAKE_CASE , help='''Model ID to upload to on the Hugging Face Hub.''' )
UpperCamelCase__ : str = parser.parse_args()
return args
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
try:
if args.tpu_name:
UpperCamelCase__ : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCamelCase__ : Any = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(SCREAMING_SNAKE_CASE )
tf.tpu.experimental.initialize_tpu_system(SCREAMING_SNAKE_CASE )
return tpu
def _a ( SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = 0
for file in file_list:
UpperCamelCase__ : List[str] = file.split('''/''' )[-1]
UpperCamelCase__ : Optional[Any] = re.search(r'''-\d+-(\d+)\.tfrecord''' , SCREAMING_SNAKE_CASE ).group(1 )
UpperCamelCase__ : List[Any] = int(SCREAMING_SNAKE_CASE )
num_samples += sample_count
return num_samples
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any=None ):
"""simple docstring"""
UpperCamelCase__ : int = count_samples(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = tf.data.Dataset.from_tensor_slices(SCREAMING_SNAKE_CASE )
if shuffle:
UpperCamelCase__ : Any = dataset.shuffle(len(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Tuple = tf.data.TFRecordDataset(SCREAMING_SNAKE_CASE , num_parallel_reads=SCREAMING_SNAKE_CASE )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCamelCase__ : Union[str, Any] = dataset.apply(tf.data.experimental.assert_cardinality(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : List[Any] = dataset.map(SCREAMING_SNAKE_CASE , num_parallel_calls=SCREAMING_SNAKE_CASE )
if shuffle:
assert shuffle_buffer_size is not None
UpperCamelCase__ : Dict = dataset.shuffle(args.shuffle_buffer_size )
UpperCamelCase__ : int = dataset.batch(SCREAMING_SNAKE_CASE , drop_remainder=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = dataset.map(SCREAMING_SNAKE_CASE , num_parallel_calls=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = dataset.prefetch(SCREAMING_SNAKE_CASE )
return dataset
def _a ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not args.no_tpu:
UpperCamelCase__ : List[Any] = initialize_tpu(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = tf.distribute.TPUStrategy(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ : Any = tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
UpperCamelCase__ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer )
UpperCamelCase__ : List[Any] = AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCamelCase__ : Dict = tokenizer.vocab_size
UpperCamelCase__ : int = tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(F"No .tfrecord files found in {args.train_dataset}." )
UpperCamelCase__ : int = tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(F"No .tfrecord files found in {args.eval_dataset}." )
UpperCamelCase__ : str = count_samples(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCamelCase__ : List[Any] = steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCamelCase__ : List[str] = TFAutoModelForMaskedLM.from_config(SCREAMING_SNAKE_CASE )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCamelCase__ , UpperCamelCase__ : int = create_optimizer(
num_train_steps=SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=SCREAMING_SNAKE_CASE , metrics=['''accuracy'''] )
def decode_fn(SCREAMING_SNAKE_CASE : Optional[Any] ):
UpperCamelCase__ : Tuple = {
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCamelCase__ : Optional[int] = DataCollatorForLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
def mask_with_collator(SCREAMING_SNAKE_CASE : int ):
# TF really needs an isin() function
UpperCamelCase__ : Optional[int] = (
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
UpperCamelCase__ , UpperCamelCase__ : List[str] = data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=SCREAMING_SNAKE_CASE , )
return batch
UpperCamelCase__ : Tuple = args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCamelCase__ : List[str] = prepare_dataset(
SCREAMING_SNAKE_CASE , decode_fn=SCREAMING_SNAKE_CASE , mask_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCamelCase__ : Optional[int] = prepare_dataset(
SCREAMING_SNAKE_CASE , decode_fn=SCREAMING_SNAKE_CASE , mask_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : List[Any] = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=SCREAMING_SNAKE_CASE ) )
model.fit(
SCREAMING_SNAKE_CASE , validation_data=SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=SCREAMING_SNAKE_CASE , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__UpperCamelCase : Dict = parse_args()
main(args)
| 146 | 1 |
"""simple docstring"""
import random
def snake_case_ ( A_ : int, A_ : float, A_ : bool = False ):
'''simple docstring'''
_lowerCamelCase : dict = {i: [] for i in range(A_ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(A_ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(A_ ):
for j in range(i + 1, A_ ):
if random.random() < probability:
graph[i].append(A_ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(A_ )
return graph
def snake_case_ ( A_ : int ):
'''simple docstring'''
return {
i: [j for j in range(A_ ) if i != j] for i in range(A_ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
"""simple docstring"""
import argparse
lowerCAmelCase__ = '''docs/source/_static/js/custom.js'''
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
with open(A_, encoding='''utf-8''', newline='''\n''' ) as f:
_lowerCamelCase : int = f.readlines()
_lowerCamelCase : List[str] = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
_lowerCamelCase : List[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(A_, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.writelines(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
lowerCAmelCase__ = parser.parse_args()
update_custom_js(args.version)
| 175 | 0 |
from __future__ import annotations
import pandas as pd
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = [0] * no_of_processes
_lowerCAmelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(snake_case ):
_lowerCAmelCase = burst_time[i]
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 9_99_99_99_99
_lowerCAmelCase = 0
_lowerCAmelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(snake_case ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
_lowerCAmelCase = remaining_time[j]
_lowerCAmelCase = j
_lowerCAmelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
_lowerCAmelCase = remaining_time[short]
if minm == 0:
_lowerCAmelCase = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
_lowerCAmelCase = False
# Find finish time of current process
_lowerCAmelCase = increment_time + 1
# Calculate waiting time
_lowerCAmelCase = finish_time - arrival_time[short]
_lowerCAmelCase = finar - burst_time[short]
if waiting_time[short] < 0:
_lowerCAmelCase = 0
# Increment time
increment_time += 1
return waiting_time
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = [0] * no_of_processes
for i in range(snake_case ):
_lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for i in range(snake_case ):
_lowerCAmelCase = total_waiting_time + waiting_time[i]
_lowerCAmelCase = total_turn_around_time + turn_around_time[i]
print(F'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
A__ = int(input())
A__ = [0] * no_of_processes
A__ = [0] * no_of_processes
A__ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
A__ , A__ = map(int, input().split())
A__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
A__ = burst_time
A__ = no_of_processes
A__ = waiting_time
A__ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
A__ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 82 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_UpperCamelCase : Optional[int] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_UpperCamelCase : List[str] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_UpperCamelCase : Tuple = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_UpperCamelCase : str = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCamelCase : Optional[int] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_UpperCamelCase : List[str] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __UpperCAmelCase ( A : Optional[int] ) -> int:
UpperCAmelCase_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , A )
return [m.group(0 ) for m in matches]
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCAmelCase_ : Optional[Any] = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
UpperCAmelCase_ : Dict = collections.defaultdict(A )
UpperCAmelCase_ : str = collections.defaultdict(A )
UpperCAmelCase_ : int = collections.defaultdict(A )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(A ):
UpperCAmelCase_ : int = None
if _re_tf_models.match(A ) is not None:
UpperCAmelCase_ : Optional[Any] = tf_models
UpperCAmelCase_ : Optional[int] = _re_tf_models.match(A ).groups()[0]
elif _re_flax_models.match(A ) is not None:
UpperCAmelCase_ : int = flax_models
UpperCAmelCase_ : Any = _re_flax_models.match(A ).groups()[0]
elif _re_pt_models.match(A ) is not None:
UpperCAmelCase_ : Union[str, Any] = pt_models
UpperCAmelCase_ : List[Any] = _re_pt_models.match(A ).groups()[0]
if lookup_dict is not None:
while len(A ) > 0:
if attr_name in model_prefix_to_model_type:
UpperCAmelCase_ : Optional[int] = True
break
# Try again after removing the last word in the name
UpperCAmelCase_ : List[Any] = ''''''.join(camel_case_split(A )[:-1] )
UpperCAmelCase_ : Tuple = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
UpperCAmelCase_ : List[Any] = list(A )
all_models.sort()
UpperCAmelCase_ : Dict = {'''model_type''': all_models}
UpperCAmelCase_ : Tuple = [pt_models[t] for t in all_models]
UpperCAmelCase_ : Dict = [tf_models[t] for t in all_models]
UpperCAmelCase_ : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
UpperCAmelCase_ : int = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
UpperCAmelCase_ : Any = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
UpperCAmelCase_ : Union[str, Any] = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
UpperCAmelCase_ : int = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
UpperCAmelCase_ : Dict = '''AutoTokenizer'''
UpperCAmelCase_ : str = [processors[t] for t in all_models]
return pd.DataFrame(A )
def __UpperCAmelCase ( A : Optional[int] ) -> str:
UpperCAmelCase_ : int = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
UpperCAmelCase_ : Tuple = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
UpperCAmelCase_ : Tuple = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(A , A , A ):
# The type of pipeline may not exist in this framework
if not hasattr(A , A ):
continue
# First extract all model_names
UpperCAmelCase_ : List[str] = []
for name in getattr(A , A ).values():
if isinstance(A , A ):
model_names.append(A )
else:
model_names.extend(list(A ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __UpperCAmelCase ( A : int , A : Any ) -> Tuple:
UpperCAmelCase_ : Tuple = get_frameworks_table()
UpperCAmelCase_ : Any = Dataset.from_pandas(A )
UpperCAmelCase_ : str = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=A )
UpperCAmelCase_ : Union[str, Any] = Dataset.from_json(A )
UpperCAmelCase_ : Optional[int] = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(A ) )
}
UpperCAmelCase_ : str = update_pipeline_and_auto_class_table(A )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
UpperCAmelCase_ : Union[str, Any] = sorted(table.keys() )
UpperCAmelCase_ : Optional[Any] = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
UpperCAmelCase_ : Dict = Dataset.from_pandas(A )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(A , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(A , '''pipeline_tags.json''' ) )
if commit_sha is not None:
UpperCAmelCase_ : List[str] = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
UpperCAmelCase_ : int = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=A , repo_type='''dataset''' , token=A , commit_message=A , )
def __UpperCAmelCase ( ) -> int:
UpperCAmelCase_ : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
UpperCAmelCase_ : List[str] = transformers_module.pipelines.SUPPORTED_TASKS
UpperCAmelCase_ : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
UpperCAmelCase_ : Optional[Any] = pipeline_tasks[key]['''pt''']
if isinstance(A , (list, tuple) ):
UpperCAmelCase_ : Dict = model[0]
UpperCAmelCase_ : Any = model.__name__
if model not in in_table.values():
missing.append(A )
if len(A ) > 0:
UpperCAmelCase_ : List[Any] = ''', '''.join(A )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_UpperCamelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 304 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : List[str] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Union[str, Any]=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Dict, lowerCAmelCase_ : int=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ''
else:
__lowerCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = ViTConfig()
__lowerCAmelCase = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
__lowerCAmelCase = True
__lowerCAmelCase = int(vit_name[-12:-10] )
__lowerCAmelCase = int(vit_name[-9:-6] )
else:
__lowerCAmelCase = 1000
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = int(vit_name[-6:-4] )
__lowerCAmelCase = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
__lowerCAmelCase = 192
__lowerCAmelCase = 768
__lowerCAmelCase = 12
__lowerCAmelCase = 3
elif vit_name[9:].startswith('small' ):
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
__lowerCAmelCase = 768
__lowerCAmelCase = 2304
__lowerCAmelCase = 8
__lowerCAmelCase = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
elif vit_name[4:].startswith('huge' ):
__lowerCAmelCase = 1280
__lowerCAmelCase = 5120
__lowerCAmelCase = 32
__lowerCAmelCase = 16
# load original model from timm
__lowerCAmelCase = timm.create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_, lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
__lowerCAmelCase = ViTModel(lowerCAmelCase_ ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
__lowerCAmelCase = DeiTImageProcessor(size=config.image_size )
else:
__lowerCAmelCase = ViTImageProcessor(size=config.image_size )
__lowerCAmelCase = image_processor(images=prepare_img(), return_tensors='pt' )
__lowerCAmelCase = encoding['pixel_values']
__lowerCAmelCase = model(lowerCAmelCase_ )
if base_model:
__lowerCAmelCase = timm_model.forward_features(lowerCAmelCase_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCAmelCase_, outputs.pooler_output, atol=1E-3 )
else:
__lowerCAmelCase = timm_model(lowerCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_, outputs.logits, atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_snake_case : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 207 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : Optional[Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 207 | 1 |
"""simple docstring"""
import os
import sys
import unittest
__snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__snake_case = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
__snake_case = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = get_test_to_tester_mapping(_A )
_a = get_test_to_tester_mapping(_A )
_a = {'''BertModelTest''': '''BertModelTester'''}
_a = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(_A ) , _A )
self.assertEqual(get_test_info.to_json(_A ) , _A )
def _UpperCAmelCase ( self ) -> Dict:
_a = get_model_to_test_mapping(_A )
_a = get_model_to_test_mapping(_A )
_a = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
_a = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(_A ) , _A )
self.assertEqual(get_test_info.to_json(_A ) , _A )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = get_model_to_tester_mapping(_A )
_a = get_model_to_tester_mapping(_A )
_a = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
_a = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(_A ) , _A )
self.assertEqual(get_test_info.to_json(_A ) , _A ) | 320 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : List[str] = '''Speech2TextFeatureExtractor'''
UpperCamelCase__ : List[str] = '''Speech2TextTokenizer'''
def __init__( self , _A , _A ):
'''simple docstring'''
super().__init__(_A , _A )
__SCREAMING_SNAKE_CASE = self.feature_extractor
__SCREAMING_SNAKE_CASE = False
def __call__( self , *_A , **_A ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
__SCREAMING_SNAKE_CASE = kwargs.pop('raw_speech' )
else:
__SCREAMING_SNAKE_CASE = kwargs.pop('audio' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('sampling_rate' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('text' , _A )
if len(_A ) > 0:
__SCREAMING_SNAKE_CASE = args[0]
__SCREAMING_SNAKE_CASE = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__SCREAMING_SNAKE_CASE = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__SCREAMING_SNAKE_CASE = encodings['input_ids']
return inputs
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _A ( self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer
yield
__SCREAMING_SNAKE_CASE = self.feature_extractor
__SCREAMING_SNAKE_CASE = False
| 257 | 0 |
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : int ):
'''simple docstring'''
_snake_case = size
# approximate the overall size of segment tree with given value
_snake_case = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
_snake_case = [0 for i in range(0 , 4 * size )]
_snake_case = [0 for i in range(0 , 4 * size )] # flag for lazy update
def A ( self : Union[str, Any] , lowercase : int ):
'''simple docstring'''
return idx * 2
def A ( self : Optional[int] , lowercase : int ):
'''simple docstring'''
return idx * 2 + 1
def A ( self : str , lowercase : int , lowercase : int , lowercase : int , lowercase : list[int] ):
'''simple docstring'''
if left_element == right_element:
_snake_case = a[left_element - 1]
else:
_snake_case = (left_element + right_element) // 2
self.build(self.left(lowercase ) , lowercase , lowercase , lowercase )
self.build(self.right(lowercase ) , mid + 1 , lowercase , lowercase )
_snake_case = max(
self.segment_tree[self.left(lowercase )] , self.segment_tree[self.right(lowercase )] )
def A ( self : str , lowercase : int , lowercase : int , lowercase : int , lowercase : int , lowercase : int , lowercase : int ):
'''simple docstring'''
if self.flag[idx] is True:
_snake_case = self.lazy[idx]
_snake_case = False
if left_element != right_element:
_snake_case = self.lazy[idx]
_snake_case = self.lazy[idx]
_snake_case = True
_snake_case = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_snake_case = val
if left_element != right_element:
_snake_case = val
_snake_case = val
_snake_case = True
_snake_case = True
return True
_snake_case = (left_element + right_element) // 2
self.update(self.left(lowercase ) , lowercase , lowercase , lowercase , lowercase , lowercase )
self.update(self.right(lowercase ) , mid + 1 , lowercase , lowercase , lowercase , lowercase )
_snake_case = max(
self.segment_tree[self.left(lowercase )] , self.segment_tree[self.right(lowercase )] )
return True
def A ( self : str , lowercase : int , lowercase : int , lowercase : int , lowercase : int , lowercase : int ):
'''simple docstring'''
if self.flag[idx] is True:
_snake_case = self.lazy[idx]
_snake_case = False
if left_element != right_element:
_snake_case = self.lazy[idx]
_snake_case = self.lazy[idx]
_snake_case = True
_snake_case = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_snake_case = (left_element + right_element) // 2
_snake_case = self.query(self.left(lowercase ) , lowercase , lowercase , lowercase , lowercase )
_snake_case = self.query(self.right(lowercase ) , mid + 1 , lowercase , lowercase , lowercase )
return max(lowercase , lowercase )
def __str__( self : List[str] ):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , lowercase , lowercase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_lowerCamelCase : Union[str, Any] = 15
_lowerCamelCase : Tuple = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt) | 363 |
from __future__ import annotations
_lowerCamelCase : Optional[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_lowerCamelCase : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def a_ ( __lowercase : list[float] ) -> list[float]:
_snake_case = []
_snake_case = len(__lowercase )
for i in range(__lowercase ):
_snake_case = -1
for j in range(i + 1 , __lowercase ):
if arr[i] < arr[j]:
_snake_case = arr[j]
break
result.append(__lowercase )
return result
def a_ ( __lowercase : list[float] ) -> list[float]:
_snake_case = []
for i, outer in enumerate(__lowercase ):
_snake_case = -1
for inner in arr[i + 1 :]:
if outer < inner:
_snake_case = inner
break
result.append(__lowercase )
return result
def a_ ( __lowercase : list[float] ) -> list[float]:
_snake_case = len(__lowercase )
_snake_case = []
_snake_case = [-1] * arr_size
for index in reversed(range(__lowercase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_snake_case = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_lowerCamelCase : Union[str, Any] = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
) | 130 | 0 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = "hf-internal-testing/tiny-random-t5"
A__ = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
A__ = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ )
A__ = tokenizer('''This is me''',return_tensors='''pt''' )
A__ = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
A__ = model.generate(**lowerCAmelCase__ )
A__ = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
A__ = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
A__ = model_reloaded.generate(**lowerCAmelCase__ )
self.assertTrue(torch.allclose(lowerCAmelCase__,lowerCAmelCase__ ) )
def UpperCamelCase ( self ):
A__ = "hf-internal-testing/tiny-random-t5"
A__ = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ )
A__ = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCAmelCase__ ):
model.save_pretrained(lowerCAmelCase__ )
A__ = model.reverse_bettertransformer()
model.save_pretrained(lowerCAmelCase__ )
| 193 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a : List[str] = logging.get_logger(__name__)
a : List[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a : str = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
a : Tuple = {'''allegro/herbert-base-cased''': 514}
a : Optional[int] = {}
class __UpperCamelCase ( a__ ):
lowerCamelCase : str =VOCAB_FILES_NAMES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Dict =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] =HerbertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__="</s>" , **lowerCAmelCase__ , ) -> Optional[int]:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.cls_token_id]
a : Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Dict = [self.sep_token_id]
a : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
a : List[str] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 105 | 0 |
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def UpperCamelCase ( ) ->None:
"""simple docstring"""
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 303 |
"""simple docstring"""
import os
import numpy
import onnx
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = a.name
a_ = b.name
a_ = ""
a_ = ""
a_ = a == b
a_ = name_a
a_ = name_b
return res
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase , UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = list(model.graph.initializer )
a_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
a_ = inits[i].name
a_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = os.path.dirname(UpperCAmelCase )
a_ = os.path.basename(UpperCAmelCase )
a_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
a_ = list(model.graph.initializer )
a_ = set()
a_ = {}
a_ = []
a_ = 0
for i in range(len(UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase )
dup_set.add(UpperCAmelCase )
a_ = inits[j].data_type
a_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , UpperCAmelCase )
total_reduced_size += mem_size
a_ = inits[i].name
a_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase )
else:
a_ = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1_024 / 1_024 / 1_024 , "GB" )
a_ = sorted(UpperCAmelCase )
_remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = "optimized_" + model_file_name
a_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
onnx.save(UpperCAmelCase , UpperCAmelCase )
return new_model | 303 | 1 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : str=7 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Tuple=18 , __lowerCamelCase : str=30 , __lowerCamelCase : List[str]=400 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Tuple=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : str=[0.5, 0.5, 0.5] , __lowerCamelCase : Union[str, Any]=False , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = size if size is not None else {'''height''': 20, '''width''': 20}
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_center_crop
SCREAMING_SNAKE_CASE__ = crop_size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = do_reduce_labels
def lowercase_ ( self : Tuple ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
SCREAMING_SNAKE_CASE__ = Image.open(dataset[0]['''file'''] )
SCREAMING_SNAKE_CASE__ = Image.open(dataset[1]['''file'''] )
return image, map
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
SCREAMING_SNAKE_CASE__ = Image.open(ds[0]['''file'''] )
SCREAMING_SNAKE_CASE__ = Image.open(ds[1]['''file'''] )
SCREAMING_SNAKE_CASE__ = Image.open(ds[2]['''file'''] )
SCREAMING_SNAKE_CASE__ = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = BeitImageProcessor if is_vision_available() else None
def lowercase_ ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = BeitImageProcessingTester(self )
@property
def lowercase_ ( self : Union[str, Any] ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
def lowercase_ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__lowerCamelCase )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> List[str]:
pass
def lowercase_ ( self : Tuple ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self : int ) -> Tuple:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self : List[Any] ) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = []
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = prepare_semantic_batch_inputs()
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def lowercase_ ( self : Tuple ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 314 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "trajectory_transformer"
a = ["past_key_values"]
a = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __lowerCamelCase : Any=100 , __lowerCamelCase : str=5 , __lowerCamelCase : int=1 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : List[Any]=249 , __lowerCamelCase : List[str]=6 , __lowerCamelCase : Dict=17 , __lowerCamelCase : str=25 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Dict=128 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=0.0006 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Tuple=1e-12 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Tuple=5_0256 , __lowerCamelCase : Dict=5_0256 , **__lowerCamelCase : str , ) -> Dict:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = action_weight
SCREAMING_SNAKE_CASE__ = reward_weight
SCREAMING_SNAKE_CASE__ = value_weight
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = block_size
SCREAMING_SNAKE_CASE__ = action_dim
SCREAMING_SNAKE_CASE__ = observation_dim
SCREAMING_SNAKE_CASE__ = transition_dim
SCREAMING_SNAKE_CASE__ = learning_rate
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kaiming_initializer_range
SCREAMING_SNAKE_CASE__ = use_cache
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 314 | 1 |
import re
def _lowerCamelCase( lowercase__ ) -> list:
'''simple docstring'''
return [char.split() for char in re.split(R'[^ a-z A-Z 0-9 \s]' , str_ )]
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
__lowercase= split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
try:
__lowercase= split_input(lowercase__ )
if upper:
__lowercase= ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__lowercase= ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return to_simple_case(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
try:
__lowercase= to_simple_case(lowercase__ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _lowerCamelCase( lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
return to_complex_case(lowercase__ , lowercase__ , '_' )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
return to_complex_case(lowercase__ , lowercase__ , '-' )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 304 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask''']
def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= spectrogram_length
__lowercase= num_channels
__lowercase= patch_size
__lowercase= feature_size // self.patch_size[1]
__lowercase= n_fft
__lowercase= sampling_rate // hop_length_to_sampling_rate
__lowercase= sampling_rate
__lowercase= padding_value
__lowercase= mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T
def _A (self , lowerCAmelCase ):
__lowercase= spectrogram(
lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
__lowercase= log_spec[:, :-1]
__lowercase= log_spec - 20.0
__lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__lowercase= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
__lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase= raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase= [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowercase= [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
__lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowercase= max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowercase= [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowercase= np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
__lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowercase= padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
__lowercase= audio_features[i]
__lowercase= feature
# return as BatchFeature
if return_attention_mask:
__lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowercase= {'audio_values': padded_audio_features}
__lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 304 | 1 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
A__ : Optional[Any] = logging.get_logger(__name__)
class __snake_case ( UpperCamelCase_ ):
def __init__( self : Any , *A_ : List[Any] , **A_ : int):
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_)
| 103 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=1_6 , __lowerCamelCase=[1, 2, 1] , __lowerCamelCase=[2, 2, 4] , __lowerCamelCase=2 , __lowerCamelCase=2.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=8 , __lowerCamelCase=["stage1", "stage2", "stage3"] , __lowerCamelCase=[1, 2, 3] , ) -> Optional[Any]:
_A : int = parent
_A : Optional[Any] = batch_size
_A : str = image_size
_A : Tuple = patch_size
_A : Tuple = num_channels
_A : Optional[int] = embed_dim
_A : Dict = depths
_A : Any = num_heads
_A : Any = window_size
_A : int = mlp_ratio
_A : Any = qkv_bias
_A : Union[str, Any] = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Dict = drop_path_rate
_A : List[Any] = hidden_act
_A : Any = use_absolute_embeddings
_A : Optional[int] = patch_norm
_A : Tuple = layer_norm_eps
_A : List[str] = initializer_range
_A : Optional[int] = is_training
_A : Optional[Any] = scope
_A : Optional[int] = use_labels
_A : Dict = type_sequence_label_size
_A : str = encoder_stride
_A : Optional[int] = out_features
_A : Optional[int] = out_indices
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_A : Optional[Any] = None
if self.use_labels:
_A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_A : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
_A : Dict = MaskFormerSwinModel(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : int = model(__lowerCamelCase)
_A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict:
_A : Optional[Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : Dict = model(__lowerCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [1_3, 1_6, 1_6, 1_6])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4])
# verify ValueError
with self.parent.assertRaises(__lowerCamelCase):
_A : Union[str, Any] = ["stem"]
_A : Union[str, Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
_A : Any = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def _lowerCamelCase ( self) -> str:
_A : Union[str, Any] = MaskFormerSwinModelTester(self)
_A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=3_7)
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
))
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self) -> str:
return
def _lowerCamelCase ( self) -> List[Any]:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase)
@unittest.skip("Swin does not use inputs_embeds")
def _lowerCamelCase ( self) -> str:
pass
@unittest.skip("Swin does not support feedforward chunking")
def _lowerCamelCase ( self) -> List[Any]:
pass
def _lowerCamelCase ( self) -> Optional[int]:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(__lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_A : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear))
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(__lowerCamelCase)
_A : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : int = [*signature.parameters.keys()]
_A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase)
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions")
def _lowerCamelCase ( self) -> Tuple:
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Any = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
with torch.no_grad():
_A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase))
_A : Tuple = outputs.hidden_states
_A : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1)
self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase)
# Swin has a different seq_length
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self) -> Dict:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[int] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Tuple:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[int] = 3
_A : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_A : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__lowerCamelCase):
_A : Optional[int] = 0
return t
def check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase={}):
with torch.no_grad():
_A : Any = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase)
_A : int = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase).to_tuple()
def recursive_check(__lowerCamelCase , __lowerCamelCase):
if isinstance(__lowerCamelCase , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif isinstance(__lowerCamelCase , __lowerCamelCase):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values()):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__lowerCamelCase) , set_nan_tensor_to_zero(__lowerCamelCase) , atol=1e-5) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
F" {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}. Dict has"
F" `nan`: {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}."
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase)
for model_class in self.all_model_classes:
_A : List[Any] = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
_A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
@require_torch
class lowerCAmelCase__ ( unittest.TestCase , a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = MaskFormerSwinConfig
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = MaskFormerSwinModelTester(self)
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Union[str, Any] = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
_A : Optional[Any] = backbone_class(__lowerCamelCase)
backbone.to(__lowerCamelCase)
backbone.eval()
_A : List[Any] = backbone(**__lowerCamelCase)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __lowerCamelCase)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
_A : List[str] = backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names))
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_A , _A , _A : List[str] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
_A : int = backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase)
self.assertIsNotNone(outputs.attentions)
| 11 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a__ : Optional[int] = '''CompVis/stable-diffusion-v1-1'''
a__ : Dict = '''CompVis/stable-diffusion-v1-2'''
a__ : int = '''CompVis/stable-diffusion-v1-3'''
a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-4'''
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) ->Dict:
super()._init_()
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , requires_safety_checker=SCREAMING_SNAKE_CASE_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self ) ->Dict[str, Any]:
return {k: getattr(self , SCREAMING_SNAKE_CASE_ ) for k in self.config.keys() if not k.startswith('''_''' )}
def __lowerCAmelCase ( self , _lowerCamelCase = "auto" ) ->Optional[int]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( self ) ->Tuple:
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Optional[Any]:
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Optional[int]:
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Optional[Any]:
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Optional[int]:
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->int:
SCREAMING_SNAKE_CASE : Optional[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(SCREAMING_SNAKE_CASE_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : Any = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : Union[str, Any] = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 364 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : Tuple = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
a__ : Optional[Any] = {'''mobilebert-uncased''': 512}
a__ : List[Any] = {}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Dict = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[int] = MobileBertTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase="[UNK]" , _lowerCamelCase="[SEP]" , _lowerCamelCase="[PAD]" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ) ->Optional[int]:
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowerCamelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(_lowerCamelCase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
SCREAMING_SNAKE_CASE : Optional[int] = strip_accents
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : List[str] = normalizer_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = do_lower_case
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any:
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 19 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __A ( metaclass=UpperCamelCase__ ):
a__ : List[str] = ["""onnx"""]
def __init__(self : List[Any] , *__a : Dict , **__a : Optional[Any] ):
requires_backends(self , ["onnx"] )
@classmethod
def _lowercase (cls : List[str] , *__a : Any , **__a : List[Any] ):
requires_backends(cls , ["onnx"] )
@classmethod
def _lowercase (cls : Optional[int] , *__a : Any , **__a : int ):
requires_backends(cls , ["onnx"] )
| 1 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : Any = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = AlbertTokenizer
lowercase = AlbertTokenizerFast
lowercase = True
lowercase = True
lowercase = True
def _lowercase( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Optional[int] = AlbertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self , A ) -> int:
UpperCAmelCase : Optional[int] = """this is a test"""
UpperCAmelCase : Dict = """this is a test"""
return input_text, output_text
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = """<pad>"""
UpperCAmelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(A ) , 30000 )
def _lowercase( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _lowercase( self ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase : int = self.get_tokenizer()
UpperCAmelCase : List[str] = self.get_rust_tokenizer()
UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé."""
UpperCAmelCase : str = tokenizer.tokenize(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A )
UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : str = AlbertTokenizer(A )
UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" )
UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" )
UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowercase( self ) -> Dict:
# fmt: off
UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 265 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''char'''
snake_case_ = '''bpe'''
snake_case_ = '''wp'''
__A = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = ['''image_processor''', '''char_tokenizer''']
snake_case_ = '''ViTImageProcessor'''
snake_case_ = '''MgpstrTokenizer'''
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCamelCase__ , )
__lowerCamelCase = kwargs.pop('feature_extractor' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
__lowerCamelCase = tokenizer
__lowerCamelCase = AutoTokenizer.from_pretrained('gpt2' )
__lowerCamelCase = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCamelCase = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None:
__lowerCamelCase = self.char_tokenizer(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase = encodings['input_ids']
return inputs
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = sequences
__lowerCamelCase = char_preds.size(0 )
__lowerCamelCase , __lowerCamelCase = self._decode_helper(lowerCamelCase__ , 'char' )
__lowerCamelCase , __lowerCamelCase = self._decode_helper(lowerCamelCase__ , 'bpe' )
__lowerCamelCase , __lowerCamelCase = self._decode_helper(lowerCamelCase__ , 'wp' )
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(lowerCamelCase__ ):
__lowerCamelCase = [char_scores[i], bpe_scores[i], wp_scores[i]]
__lowerCamelCase = [char_strs[i], bpe_strs[i], wp_strs[i]]
__lowerCamelCase = scores.index(max(lowerCamelCase__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__lowerCamelCase = {}
__lowerCamelCase = final_strs
__lowerCamelCase = final_scores
__lowerCamelCase = char_strs
__lowerCamelCase = bpe_strs
__lowerCamelCase = wp_strs
return out
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
__lowerCamelCase = self.char_decode
__lowerCamelCase = 1
__lowerCamelCase = '[s]'
elif format == DecodeType.BPE:
__lowerCamelCase = self.bpe_decode
__lowerCamelCase = 2
__lowerCamelCase = '#'
elif format == DecodeType.WORDPIECE:
__lowerCamelCase = self.wp_decode
__lowerCamelCase = 102
__lowerCamelCase = '[SEP]'
else:
raise ValueError(f"""Format {format} is not supported.""" )
__lowerCamelCase , __lowerCamelCase = [], []
__lowerCamelCase = pred_logits.size(0 )
__lowerCamelCase = pred_logits.size(1 )
__lowerCamelCase , __lowerCamelCase = pred_logits.topk(1 , dim=-1 , largest=lowerCamelCase__ , sorted=lowerCamelCase__ )
__lowerCamelCase = preds_index.view(-1 , lowerCamelCase__ )[:, 1:]
__lowerCamelCase = decoder(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase = torch.nn.functional.softmax(lowerCamelCase__ , dim=2 ).max(dim=2 )
__lowerCamelCase = preds_max_prob[:, 1:]
for index in range(lowerCamelCase__ ):
__lowerCamelCase = preds_str[index].find(lowerCamelCase__ )
__lowerCamelCase = preds_str[index][:pred_eos]
__lowerCamelCase = preds_index[index].cpu().tolist()
__lowerCamelCase = pred_index.index(lowerCamelCase__ ) if eos_token in pred_index else -1
__lowerCamelCase = preds_max_prob[index][: pred_eos_index + 1]
__lowerCamelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCamelCase__ )
conf_scores.append(lowerCamelCase__ )
return dec_strs, conf_scores
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(lowerCamelCase__ )]
return decode_strs
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(lowerCamelCase__ )]
return decode_strs
| 366 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
__lowerCamelCase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
__lowerCamelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCamelCase = [4, 4, 4, 4]
__lowerCamelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
else:
__lowerCamelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCamelCase = 96
elif "small" in model_name:
__lowerCamelCase = 96
elif "base" in model_name:
__lowerCamelCase = 128
elif "large" in model_name:
__lowerCamelCase = 192
elif "xlarge" in model_name:
__lowerCamelCase = 256
elif "huge" in model_name:
__lowerCamelCase = 352
# set label information
__lowerCamelCase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowerCamelCase = 'imagenet-22k-id2label.json'
else:
__lowerCamelCase = 'imagenet-1k-id2label.json'
__lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = FocalNetConfig(
embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , focal_levels=UpperCamelCase__ , focal_windows=UpperCamelCase__ , use_conv_embed=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , use_post_layernorm=UpperCamelCase__ , use_layerscale=UpperCamelCase__ , )
return config
def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> str:
"""simple docstring"""
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowerCamelCase = 'encoder.' + name
if "encoder.layers" in name:
__lowerCamelCase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowerCamelCase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowerCamelCase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCamelCase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCamelCase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCamelCase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowerCamelCase = 'layernorm.weight'
if name == "norm.bias":
__lowerCamelCase = 'layernorm.bias'
if "head" in name:
__lowerCamelCase = name.replace('head' , 'classifier' )
else:
__lowerCamelCase = 'focalnet.' + name
return name
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Dict:
"""simple docstring"""
__lowerCamelCase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowerCamelCase = model_name_to_url[model_name]
print('Checkpoint URL: ' , UpperCamelCase__ )
__lowerCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
__lowerCamelCase = get_focalnet_config(UpperCamelCase__ )
__lowerCamelCase = FocalNetForImageClassification(UpperCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase__ )
# verify conversion
__lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase = BitImageProcessor(
do_resize=UpperCamelCase__ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ , crop_size=224 , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , )
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
__lowerCamelCase = processor(images=UpperCamelCase__ , return_tensors='pt' )
__lowerCamelCase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
__lowerCamelCase = image_transforms(UpperCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCamelCase__ , atol=1E-4 )
__lowerCamelCase = model(**UpperCamelCase__ )
__lowerCamelCase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
__lowerCamelCase = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
__lowerCamelCase = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
__lowerCamelCase = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
__lowerCamelCase = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
__lowerCamelCase = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
__A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 348 | 0 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ :
"""simple docstring"""
def __init__( self : int, _snake_case : str, _snake_case : int=1_3, _snake_case : List[str]=3_2, _snake_case : Any=3, _snake_case : Optional[int]=4, _snake_case : List[Any]=[1_0, 2_0, 3_0, 4_0], _snake_case : Optional[int]=[2, 2, 3, 2], _snake_case : Any=True, _snake_case : Union[str, Any]=True, _snake_case : Any=3_7, _snake_case : str="gelu", _snake_case : List[Any]=1_0, _snake_case : Optional[Any]=0.0_2, _snake_case : str=["stage2", "stage3", "stage4"], _snake_case : Tuple=[2, 3, 4], _snake_case : Dict=None, ) ->Optional[Any]:
snake_case__ : int = parent
snake_case__ : List[Any] = batch_size
snake_case__ : Dict = image_size
snake_case__ : Tuple = num_channels
snake_case__ : str = num_stages
snake_case__ : Any = hidden_sizes
snake_case__ : Any = depths
snake_case__ : int = is_training
snake_case__ : Any = use_labels
snake_case__ : Any = intermediate_size
snake_case__ : int = hidden_act
snake_case__ : List[Any] = num_labels
snake_case__ : Dict = initializer_range
snake_case__ : Dict = out_features
snake_case__ : Optional[int] = out_indices
snake_case__ : Optional[Any] = scope
def lowercase_ ( self : int ) ->int:
snake_case__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
if self.use_labels:
snake_case__ : List[Any] = ids_tensor([self.batch_size], self.num_labels )
snake_case__ : Tuple = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Optional[int] ) ->Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=_snake_case, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def lowercase_ ( self : Any, _snake_case : Optional[Any], _snake_case : Optional[int], _snake_case : Optional[Any] ) ->Dict:
snake_case__ : Union[str, Any] = ConvNextModel(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : List[Any] = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2), )
def lowercase_ ( self : Tuple, _snake_case : str, _snake_case : Dict, _snake_case : Optional[int] ) ->List[Any]:
snake_case__ : Optional[int] = ConvNextForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Tuple = model(_snake_case, labels=_snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict, _snake_case : Dict, _snake_case : Any, _snake_case : Dict ) ->Optional[int]:
snake_case__ : Dict = ConvNextBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[int] = model(_snake_case )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case__ : str = None
snake_case__ : Optional[Any] = ConvNextBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Dict = model(_snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def lowercase_ ( self : List[str] ) ->Optional[Any]:
snake_case__ : str = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = config_and_inputs
snake_case__ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def lowercase_ ( self : Tuple ) ->str:
snake_case__ : List[Any] = ConvNextModelTester(self )
snake_case__ : Union[str, Any] = ConfigTester(self, config_class=_snake_case, has_text_modality=_snake_case, hidden_size=3_7 )
def lowercase_ ( self : Union[str, Any] ) ->int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self : Any ) ->Tuple:
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def lowercase_ ( self : Optional[int] ) ->Optional[Any]:
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def lowercase_ ( self : List[str] ) ->Union[str, Any]:
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def lowercase_ ( self : Optional[Any] ) ->Union[str, Any]:
pass
def lowercase_ ( self : Union[str, Any] ) ->Optional[int]:
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[Any] = model_class(_snake_case )
snake_case__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : int = [*signature.parameters.keys()]
snake_case__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1], _snake_case )
def lowercase_ ( self : Tuple ) ->Optional[Any]:
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def lowercase_ ( self : Dict ) ->str:
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_snake_case )
def lowercase_ ( self : Dict ) ->Any:
def check_hidden_states_output(_snake_case : Optional[Any], _snake_case : int, _snake_case : Union[str, Any] ):
snake_case__ : Union[str, Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
snake_case__ : Tuple = model(**self._prepare_for_class(_snake_case, _snake_case ) )
snake_case__ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_snake_case ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = True
check_hidden_states_output(_snake_case, _snake_case, _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Optional[Any] = True
check_hidden_states_output(_snake_case, _snake_case, _snake_case )
def lowercase_ ( self : Any ) ->Any:
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def lowercase_ ( self : Tuple ) ->Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Dict = ConvNextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def lowercase_ ():
snake_case__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self : Optional[int] ) ->Any:
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def lowercase_ ( self : Union[str, Any] ) ->Any:
snake_case__ : Tuple = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(_snake_case )
snake_case__ : Optional[int] = self.default_image_processor
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : int = image_processor(images=_snake_case, return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**_snake_case )
# verify the logits
snake_case__ : Tuple = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, _snake_case )
snake_case__ : Dict = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3], _snake_case, atol=1e-4 ) )
@require_torch
class snake_case__ ( unittest.TestCase , lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (ConvNextBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = ConvNextConfig
_SCREAMING_SNAKE_CASE = False
def lowercase_ ( self : Tuple ) ->Tuple:
snake_case__ : List[Any] = ConvNextModelTester(self )
| 277 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ (A : str , A : List[Any] , A : Any ):
# Initialise PyTorch model
snake_case__ : List[Any] = LxmertConfig.from_json_file(A )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case__ : List[str] = LxmertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A , A , A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
a_ :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ :Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 277 | 1 |
def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(UpperCamelCase_ ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165 | from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __init__( self : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size , lowerCamelCase_ ):
UpperCamelCase = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
UpperCamelCase = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCamelCase = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase = self.scheduler.step(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , eta=lowerCamelCase_ , use_clipped_model_output=lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 165 | 1 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
def lowerCamelCase__ ( snake_case_ : Tuple ) -> str:
print('''Loading config file...''' )
def flatten_yaml_as_dict(snake_case_ : Optional[int] , snake_case_ : Optional[int]="" , snake_case_ : Tuple="." ):
__snake_case = []
for k, v in d.items():
__snake_case = parent_key + sep + k if parent_key else k
if isinstance(snake_case_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case_ , snake_case_ , sep=snake_case_ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case_ )
__snake_case = argparse.Namespace()
with open(snake_case_ , '''r''' ) as yaml_file:
try:
__snake_case = yaml.load(snake_case_ , Loader=yaml.FullLoader )
__snake_case = flatten_yaml_as_dict(snake_case_ )
for k, v in flat_cfg.items():
setattr(snake_case_ , snake_case_ , snake_case_ )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(snake_case_ , str(snake_case_ ) ) )
return config
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Tuple ) -> Dict:
__snake_case = MobileViTVaConfig()
__snake_case = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
__snake_case = 1000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__snake_case = 384
else:
__snake_case = 256
__snake_case = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
__snake_case = 2_1000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__snake_case = 384
else:
__snake_case = 256
__snake_case = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
__snake_case = 151
__snake_case = 512
__snake_case = '''ade20k-id2label.json'''
__snake_case = True
elif task_name.startswith('''voc_''' ):
__snake_case = 21
__snake_case = 512
__snake_case = '''pascal-voc-id2label.json'''
__snake_case = True
# orig_config
__snake_case = load_orig_config_file(snake_case_ )
assert getattr(snake_case_ , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
__snake_case = getattr(snake_case_ , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(snake_case_ , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__snake_case = getattr(snake_case_ , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__snake_case = getattr(snake_case_ , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
__snake_case = getattr(snake_case_ , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
__snake_case = getattr(snake_case_ , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
__snake_case = getattr(snake_case_ , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
__snake_case = '''huggingface/label-files'''
__snake_case = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) )
__snake_case = {int(snake_case_ ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : List[Any] ) -> Optional[int]:
__snake_case = dct.pop(snake_case_ )
__snake_case = val
def lowerCamelCase__ ( snake_case_ : Optional[int] , snake_case_ : Tuple=False ) -> int:
if base_model:
__snake_case = ''''''
else:
__snake_case = '''mobilevitv2.'''
__snake_case = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__snake_case = k[8:]
else:
__snake_case = k
if ".block." in k:
__snake_case = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
__snake_case = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
__snake_case = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
__snake_case = k_new.replace('''conv_1.''' , f"""{model_prefix}conv_stem.""" )
for i in [1, 2]:
if f"""layer_{i}.""" in k:
__snake_case = k_new.replace(f"""layer_{i}.""" , f"""{model_prefix}encoder.layer.{i-1}.layer.""" )
if ".exp_1x1." in k:
__snake_case = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
__snake_case = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"""layer_{i}.0.""" in k:
__snake_case = k_new.replace(f"""layer_{i}.0.""" , f"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" )
if f"""layer_{i}.1.local_rep.0.""" in k:
__snake_case = k_new.replace(f"""layer_{i}.1.local_rep.0.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" )
if f"""layer_{i}.1.local_rep.1.""" in k:
__snake_case = k_new.replace(f"""layer_{i}.1.local_rep.1.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" )
for i in [3, 4, 5]:
if i == 3:
__snake_case = [0, 1]
elif i == 4:
__snake_case = [0, 1, 2, 3]
elif i == 5:
__snake_case = [0, 1, 2]
for j in j_in:
if f"""layer_{i}.1.global_rep.{j}.""" in k:
__snake_case = k_new.replace(
f"""layer_{i}.1.global_rep.{j}.""" , f"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" )
if f"""layer_{i}.1.global_rep.{j+1}.""" in k:
__snake_case = k_new.replace(
f"""layer_{i}.1.global_rep.{j+1}.""" , f"""{model_prefix}encoder.layer.{i-1}.layernorm.""" )
if f"""layer_{i}.1.conv_proj.""" in k:
__snake_case = k_new.replace(f"""layer_{i}.1.conv_proj.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" )
if "pre_norm_attn.0." in k:
__snake_case = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
__snake_case = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
__snake_case = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
__snake_case = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
__snake_case = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
__snake_case = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
__snake_case = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
__snake_case = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
__snake_case = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def lowerCamelCase__ ( snake_case_ : Tuple ) -> List[str]:
__snake_case = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(snake_case_ )
for k in keys_to_ignore:
state_dict.pop(snake_case_ , snake_case_ )
def lowerCamelCase__ ( ) -> str:
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__snake_case = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : int ) -> int:
__snake_case = get_mobilevitva_config(snake_case_ , snake_case_ )
# load original state_dict
__snake_case = torch.load(snake_case_ , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
__snake_case = MobileViTVaForSemanticSegmentation(snake_case_ ).eval()
__snake_case = False
else:
__snake_case = MobileViTVaForImageClassification(snake_case_ ).eval()
__snake_case = False
# remove and rename some keys of load the original model
__snake_case = checkpoint
remove_unused_keys(snake_case_ )
__snake_case = create_rename_keys(snake_case_ , base_model=snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
# load modified state_dict
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__snake_case = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' )
__snake_case = model(**snake_case_ )
# verify classification model
if task_name.startswith('''imagenet''' ):
__snake_case = outputs.logits
__snake_case = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__snake_case = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] , snake_case_ , atol=1e-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model {task_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
snake_case_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 24 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowerCAmelCase__ : str = set()
# Replace all the whitespace in our sentence
lowerCAmelCase__ : Tuple = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(UpperCamelCase ) == 26
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowerCAmelCase__ : Any = [False] * 26
for char in input_str:
if char.islower():
lowerCAmelCase__ : Optional[Any] = True
elif char.isupper():
lowerCAmelCase__ : Any = True
return all(UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from timeit import timeit
lowerCAmelCase__ : Union[str, Any] = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=UpperCamelCase ) )
print(timeit("""is_pangram_faster()""" , setup=UpperCamelCase ) )
print(timeit("""is_pangram_fastest()""" , setup=UpperCamelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 37 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a: Dict = logging.get_logger(__name__)
__a: int = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class UpperCAmelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "roc_bert"
def __init__( self , __lowerCAmelCase=30522 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=True , __lowerCAmelCase=0 , __lowerCAmelCase="absolute" , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=768 , __lowerCAmelCase=910 , __lowerCAmelCase=512 , __lowerCAmelCase=24858 , __lowerCAmelCase=True , **__lowerCAmelCase , ) -> Optional[int]:
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = max_position_embeddings
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Tuple = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : str = intermediate_size
lowercase__ : Any = hidden_act
lowercase__ : Optional[int] = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : List[Any] = initializer_range
lowercase__ : str = type_vocab_size
lowercase__ : Dict = layer_norm_eps
lowercase__ : Tuple = use_cache
lowercase__ : List[str] = enable_pronunciation
lowercase__ : Union[str, Any] = enable_shape
lowercase__ : List[Any] = pronunciation_embed_dim
lowercase__ : Dict = pronunciation_vocab_size
lowercase__ : Any = shape_embed_dim
lowercase__ : List[Any] = shape_vocab_size
lowercase__ : Union[str, Any] = concat_input
lowercase__ : Optional[Any] = position_embedding_type
lowercase__ : int = classifier_dropout
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 363 | '''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=2 , __lowerCAmelCase=32 , __lowerCAmelCase=16 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=32 , __lowerCAmelCase=4 , __lowerCAmelCase=[0, 1, 2, 3] , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=3 , __lowerCAmelCase=[1, 384, 24, 24] , __lowerCAmelCase=True , __lowerCAmelCase=None , ) -> Dict:
lowercase__ : str = parent
lowercase__ : List[Any] = batch_size
lowercase__ : Dict = image_size
lowercase__ : Tuple = patch_size
lowercase__ : str = num_channels
lowercase__ : Dict = is_training
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = hidden_size
lowercase__ : int = num_hidden_layers
lowercase__ : int = backbone_out_indices
lowercase__ : List[str] = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : List[Any] = initializer_range
lowercase__ : Optional[int] = num_labels
lowercase__ : Optional[int] = backbone_featmap_shape
lowercase__ : int = scope
lowercase__ : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowercase__ : List[str] = (image_size // patch_size) ** 2
lowercase__ : Tuple = num_patches + 1
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Optional[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__lowerCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
lowercase__ : Optional[int] = DPTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
lowercase__ : Union[str, Any] = self.num_labels
lowercase__ : str = DPTForDepthEstimation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
lowercase__ : str = self.num_labels
lowercase__ : Tuple = DPTForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : Dict = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : List[str] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : str = DPTModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def _lowerCAmelCase( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def _lowerCAmelCase( self ) -> Tuple:
pass
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(__lowerCAmelCase )
lowercase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = True
if model_class in get_values(__lowerCAmelCase ):
continue
lowercase__ : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
lowercase__ : Tuple = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
lowercase__ : str = model(**__lowerCAmelCase ).loss
loss.backward()
def _lowerCAmelCase( self ) -> Tuple:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Any = False
lowercase__ : str = True
if model_class in get_values(__lowerCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
lowercase__ : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.train()
lowercase__ : str = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
lowercase__ : List[Any] = model(**__lowerCAmelCase ).loss
loss.backward()
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = _config_zero_init(__lowerCAmelCase )
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(config=__lowerCAmelCase )
# Skip the check for the backbone
lowercase__ : Union[str, Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowercase__ : List[Any] = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCAmelCase( self ) -> List[str]:
pass
@slow
def _lowerCAmelCase( self ) -> List[Any]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowercase__ : Dict = DPTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _lowerCAmelCase( self ) -> str:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str] = '''add'''
with self.assertRaises(__lowerCAmelCase ):
lowercase__ : Tuple = DPTForDepthEstimation(__lowerCAmelCase )
def __UpperCamelCase ( ):
lowercase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Optional[int] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
lowercase__ : List[Any] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(__lowerCAmelCase )
lowercase__ : Optional[Any] = prepare_img()
lowercase__ : Optional[Any] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**__lowerCAmelCase )
lowercase__ : str = outputs.predicted_depth
# verify the predicted depth
lowercase__ : Optional[Any] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __lowerCAmelCase )
lowercase__ : str = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __lowerCAmelCase , atol=1E-4 ) )
| 214 | 0 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__a = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :List[Any] = AlbertTokenizer
a :Union[str, Any] = AlbertTokenizerFast
a :str = True
a :Tuple = True
a :Tuple = True
def _lowercase ( self : Tuple ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ = AlbertTokenizer(SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> int:
lowercase_ = """this is a test"""
lowercase_ = """this is a test"""
return input_text, output_text
def _lowercase ( self : int ) -> int:
lowercase_ = """<pad>"""
lowercase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[Any] ) -> Any:
lowercase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 3_0_0_0_0 )
def _lowercase ( self : Any ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def _lowercase ( self : int ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
lowercase_ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowercase_ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowercase_ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> Any:
lowercase_ = AlbertTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowercase_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [4_8, 2_5, 2_1, 1_2_8_9] )
lowercase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
lowercase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] )
lowercase_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def _lowercase ( self : Dict ) -> Optional[Any]:
lowercase_ = AlbertTokenizer(SCREAMING_SNAKE_CASE_ )
lowercase_ = tokenizer.encode('''sequence builders''' )
lowercase_ = tokenizer.encode('''multi-sequence build''' )
lowercase_ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
lowercase_ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowercase ( self : Optional[int] ) -> Dict:
# fmt: off
lowercase_ = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 30 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Optional[int]:
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : Optional[int] = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[Any] = is_training
UpperCAmelCase : Dict = use_input_mask
UpperCAmelCase : str = use_token_type_ids
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : Optional[int] = num_attention_heads
UpperCAmelCase : int = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : List[str] = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : str = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : List[str] = type_sequence_label_size
UpperCAmelCase : int = initializer_range
UpperCAmelCase : str = num_labels
UpperCAmelCase : Optional[int] = num_choices
UpperCAmelCase : Dict = scope
UpperCAmelCase : Union[str, Any] = vocab_size - 1
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_input_mask:
UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : List[str] = None
if self.use_labels:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowercase( self ) -> Optional[Any]:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase : Any = True
return config, input_ids, input_mask, token_labels
def _lowercase( self , A , A , A ) -> int:
UpperCAmelCase : str = GPTNeoXModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[str] = model(A , attention_mask=A )
UpperCAmelCase : List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A ) -> Optional[int]:
UpperCAmelCase : str = True
UpperCAmelCase : Optional[Any] = GPTNeoXModel(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A ) -> List[str]:
UpperCAmelCase : Tuple = GPTNeoXForCausalLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : str = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A ) -> Tuple:
UpperCAmelCase : List[str] = self.num_labels
UpperCAmelCase : Any = GPTNeoXForQuestionAnswering(A )
model.to(A )
model.eval()
UpperCAmelCase : str = model(A , attention_mask=A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase( self , A , A , A , A ) -> int:
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : List[str] = GPTNeoXForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A , A ) -> str:
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Tuple = GPTNeoXForTokenClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase( self , A , A , A ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : str = GPTNeoXForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
UpperCAmelCase : List[str] = model(A , attention_mask=A , use_cache=A )
UpperCAmelCase : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : Dict = model(A , attention_mask=A , output_hidden_states=A )
UpperCAmelCase : Any = output_from_no_past["""hidden_states"""][0]
UpperCAmelCase : List[str] = model(
A , attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : str = GPTNeoXModelTester(self )
UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A , hidden_size=64 , num_attention_heads=8 )
def _lowercase( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A , A , A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(A , A , A )
def _lowercase( self ) -> Optional[Any]:
# This regression test was failing with PyTorch < 1.3
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(A , A , A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(A , A , A )
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*A )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def _lowercase( self ) -> Any:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _lowercase( self ) -> Optional[int]:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase( self , A ) -> str:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : int = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Dict = GPTNeoXModel(A )
original_model.to(A )
original_model.eval()
UpperCAmelCase : List[str] = original_model(A ).last_hidden_state
UpperCAmelCase : Any = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Any = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase : str = GPTNeoXModel(A )
scaled_model.to(A )
scaled_model.eval()
UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state
UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : str = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCAmelCase : int = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(A )
UpperCAmelCase : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCAmelCase : List[str] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCAmelCase : Union[str, Any] = model.generate(**A , do_sample=A , max_new_tokens=20 )
UpperCAmelCase : Tuple = tokenizer.batch_decode(A )[0]
self.assertEqual(A , A )
| 265 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ ( lowercase__ ):
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=9_9 , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=False , snake_case=True , snake_case="None" , snake_case=3 , snake_case=4 , snake_case=None , ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : int =parent
_UpperCAmelCase : str =batch_size
_UpperCAmelCase : Dict =seq_length
_UpperCAmelCase : Optional[int] =is_training
_UpperCAmelCase : Union[str, Any] =use_input_mask
_UpperCAmelCase : Optional[int] =use_token_type_ids
_UpperCAmelCase : Tuple =use_labels
_UpperCAmelCase : Optional[int] =vocab_size
_UpperCAmelCase : str =hidden_size
_UpperCAmelCase : Dict =num_hidden_layers
_UpperCAmelCase : Dict =num_attention_heads
_UpperCAmelCase : List[Any] =intermediate_size
_UpperCAmelCase : int =hidden_act
_UpperCAmelCase : Dict =hidden_dropout_prob
_UpperCAmelCase : str =attention_probs_dropout_prob
_UpperCAmelCase : List[str] =max_position_embeddings
_UpperCAmelCase : str =type_vocab_size
_UpperCAmelCase : Any =type_sequence_label_size
_UpperCAmelCase : Union[str, Any] =initializer_range
_UpperCAmelCase : Union[str, Any] =num_labels
_UpperCAmelCase : List[str] =num_choices
_UpperCAmelCase : Optional[Any] =relative_attention
_UpperCAmelCase : Dict =position_biased_input
_UpperCAmelCase : Optional[int] =pos_att_type
_UpperCAmelCase : List[Any] =scope
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase : int =None
if self.use_input_mask:
_UpperCAmelCase : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
_UpperCAmelCase : Optional[Any] =None
if self.use_token_type_ids:
_UpperCAmelCase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase : Optional[Any] =None
_UpperCAmelCase : List[Any] =None
_UpperCAmelCase : List[str] =None
if self.use_labels:
_UpperCAmelCase : Dict =ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCAmelCase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCAmelCase : int =ids_tensor([self.batch_size] , self.num_choices)
_UpperCAmelCase : List[str] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =self.get_config()
_UpperCAmelCase : Optional[int] =3_0_0
return config
def lowerCAmelCase ( self , snake_case) -> int:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size()) , [])
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] =DebertaModel(config=_a)
model.to(_a)
model.eval()
_UpperCAmelCase : Tuple =model(_a , attention_mask=_a , token_type_ids=_a)[0]
_UpperCAmelCase : Optional[int] =model(_a , token_type_ids=_a)[0]
_UpperCAmelCase : Tuple =model(_a)[0]
self.parent.assertListEqual(list(sequence_output.size()) , [self.batch_size, self.seq_length, self.hidden_size])
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Any =DebertaForMaskedLM(config=_a)
model.to(_a)
model.eval()
_UpperCAmelCase : str =model(_a , attention_mask=_a , token_type_ids=_a , labels=_a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] =self.num_labels
_UpperCAmelCase : List[str] =DebertaForSequenceClassification(_a)
model.to(_a)
model.eval()
_UpperCAmelCase : str =model(_a , attention_mask=_a , token_type_ids=_a , labels=_a)
self.parent.assertListEqual(list(result.logits.size()) , [self.batch_size, self.num_labels])
self.check_loss_output(_a)
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> int:
'''simple docstring'''
_UpperCAmelCase : Tuple =self.num_labels
_UpperCAmelCase : List[str] =DebertaForTokenClassification(config=_a)
model.to(_a)
model.eval()
_UpperCAmelCase : List[str] =model(_a , attention_mask=_a , token_type_ids=_a , labels=_a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict =DebertaForQuestionAnswering(config=_a)
model.to(_a)
model.eval()
_UpperCAmelCase : int =model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Any =self.prepare_config_and_inputs()
(
_UpperCAmelCase
) : str =config_and_inputs
_UpperCAmelCase : List[Any] ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowercase__ ,lowercase__ ,unittest.TestCase ):
UpperCAmelCase =(
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase =(
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase =True
UpperCAmelCase =False
UpperCAmelCase =False
UpperCAmelCase =False
UpperCAmelCase =False
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Any =DebertaModelTester(self)
_UpperCAmelCase : Any =ConfigTester(self , config_class=_a , hidden_size=3_7)
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_a)
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_a)
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_a)
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_a)
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_a)
@slow
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Dict =DebertaModel.from_pretrained(_a)
self.assertIsNotNone(_a)
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet')
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =DebertaModel.from_pretrained('microsoft/deberta-base')
_UpperCAmelCase : Optional[int] =torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]])
_UpperCAmelCase : Optional[Any] =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_UpperCAmelCase : Dict =model(_a , attention_mask=_a)[0]
# compare the actual values for a slice.
_UpperCAmelCase : int =torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1E-4) , f"{output[:, 1:4, 1:4]}")
| 355 |
'''simple docstring'''
from string import ascii_uppercase
lowercase ={str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 3_6:
raise ValueError('base must be <= 36' )
_UpperCAmelCase : Union[str, Any] =''
_UpperCAmelCase : Optional[int] =0
_UpperCAmelCase : str =0
while div != 1:
_UpperCAmelCase , _UpperCAmelCase : int =divmod(__lowerCamelCase , __lowerCamelCase )
if base >= 1_1 and 9 < mod < 3_6:
_UpperCAmelCase : str =ALPHABET_VALUES[str(__lowerCamelCase )]
else:
_UpperCAmelCase : Any =str(__lowerCamelCase )
new_value += actual_value
_UpperCAmelCase : Union[str, Any] =num // base
_UpperCAmelCase : Dict =div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__lowerCamelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 242 | 0 |
import math
def _lowerCAmelCase ( lowerCAmelCase_ :list , lowerCAmelCase_ :int = 0 , lowerCAmelCase_ :int = 0 )->List[Any]:
'''simple docstring'''
snake_case_ = end or len(lowerCAmelCase_ )
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
snake_case_ = i
snake_case_ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
snake_case_ = array[temp_index - 1]
temp_index -= 1
snake_case_ = temp_index_value
return array
def _lowerCAmelCase ( lowerCAmelCase_ :list , lowerCAmelCase_ :int , lowerCAmelCase_ :int )->int: # Max Heap
'''simple docstring'''
snake_case_ = index
snake_case_ = 2 * index + 1 # Left Node
snake_case_ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
snake_case_ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
snake_case_ = right_index
if largest != index:
snake_case_ = array[largest], array[index]
heapify(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _lowerCAmelCase ( lowerCAmelCase_ :list )->Optional[Any]:
'''simple docstring'''
snake_case_ = len(lowerCAmelCase_ )
for i in range(n // 2 , -1 , -1 ):
heapify(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(n - 1 , 0 , -1 ):
snake_case_ = array[0], array[i]
heapify(lowerCAmelCase_ , 0 , lowerCAmelCase_ )
return array
def _lowerCAmelCase ( lowerCAmelCase_ :list , lowerCAmelCase_ :int , lowerCAmelCase_ :int , lowerCAmelCase_ :int )->List[str]:
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def _lowerCAmelCase ( lowerCAmelCase_ :list , lowerCAmelCase_ :int , lowerCAmelCase_ :int , lowerCAmelCase_ :int )->Optional[Any]:
'''simple docstring'''
snake_case_ = low
snake_case_ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
snake_case_ = array[j], array[i]
i += 1
def _lowerCAmelCase ( lowerCAmelCase_ :list )->Any:
'''simple docstring'''
if len(lowerCAmelCase_ ) == 0:
return array
snake_case_ = 2 * math.ceil(math.loga(len(lowerCAmelCase_ ) ) )
snake_case_ = 16
return intro_sort(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ )
def _lowerCAmelCase ( lowerCAmelCase_ :list , lowerCAmelCase_ :int , lowerCAmelCase_ :int , lowerCAmelCase_ :int , lowerCAmelCase_ :int )->Dict:
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowerCAmelCase_ )
max_depth -= 1
snake_case_ = median_of_a(lowerCAmelCase_ , lowerCAmelCase_ , start + ((end - start) // 2) + 1 , end - 1 )
snake_case_ = partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
intro_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ = p
return insertion_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE :int = input('''Enter numbers separated by a comma : ''').strip()
SCREAMING_SNAKE_CASE :Union[str, Any] = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 159 | import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
a_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __lowercase ( lowerCamelCase : Optional[Any] ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : str ):
return max(metric_fn(lowerCamelCase , lowerCamelCase ) for gt in ground_truths )
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : Dict ):
UpperCamelCase_ : Tuple = [line.strip() for line in open(lowerCamelCase , 'r' ).readlines()]
UpperCamelCase_ : List[Any] = []
if args.gold_data_mode == "qa":
UpperCamelCase_ : Union[str, Any] = pd.read_csv(lowerCamelCase , sep='\t' , header=lowerCamelCase )
for answer_list in data[1]:
UpperCamelCase_ : Optional[int] = ast.literal_eval(lowerCamelCase )
answers.append(lowerCamelCase )
else:
UpperCamelCase_ : int = [line.strip() for line in open(lowerCamelCase , 'r' ).readlines()]
UpperCamelCase_ : Optional[int] = [[reference] for reference in references]
UpperCamelCase_ : Optional[int] = 0
for prediction, ground_truths in zip(lowerCamelCase , lowerCamelCase ):
total += 1
em += metric_max_over_ground_truths(lowerCamelCase , lowerCamelCase , lowerCamelCase )
fa += metric_max_over_ground_truths(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCamelCase_ : Union[str, Any] = 1_0_0.0 * em / total
UpperCamelCase_ : List[Any] = 1_0_0.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __lowercase ( lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : List[str] ):
UpperCamelCase_ : Optional[int] = args.k
UpperCamelCase_ : List[Any] = [line.strip() for line in open(lowerCamelCase , 'r' ).readlines()]
UpperCamelCase_ : List[str] = [line.strip() for line in open(lowerCamelCase , 'r' ).readlines()]
UpperCamelCase_ : List[str] = 0
for hypo, reference in zip(lowerCamelCase , lowerCamelCase ):
UpperCamelCase_ : List[str] = set(hypo.split('\t' )[:k] )
UpperCamelCase_ : int = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
UpperCamelCase_ : Union[str, Any] = 1_0_0.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __lowercase ( lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : Any ):
def strip_title(lowerCamelCase : List[str] ):
if title.startswith('"' ):
UpperCamelCase_ : List[str] = title[1:]
if title.endswith('"' ):
UpperCamelCase_ : int = title[:-1]
return title
UpperCamelCase_ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase , return_tensors='pt' , padding=lowerCamelCase , truncation=lowerCamelCase , )['input_ids'].to(args.device )
UpperCamelCase_ : int = rag_model.rag.question_encoder(lowerCamelCase )
UpperCamelCase_ : List[str] = question_enc_outputs[0]
UpperCamelCase_ : Tuple = rag_model.retriever(
lowerCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
UpperCamelCase_ : str = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
UpperCamelCase_ : int = []
for docs in all_docs:
UpperCamelCase_ : Union[str, Any] = [strip_title(lowerCamelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(lowerCamelCase ) )
return provenance_strings
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
with torch.no_grad():
UpperCamelCase_ : List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase , return_tensors='pt' , padding=lowerCamelCase , truncation=lowerCamelCase )
UpperCamelCase_ : Union[str, Any] = inputs_dict.input_ids.to(args.device )
UpperCamelCase_ : str = inputs_dict.attention_mask.to(args.device )
UpperCamelCase_ : List[Any] = rag_model.generate( # rag_model overwrites generate
lowerCamelCase , attention_mask=lowerCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
UpperCamelCase_ : str = rag_model.retriever.generator_tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
if args.print_predictions:
for q, a in zip(lowerCamelCase , lowerCamelCase ):
logger.info('Q: {} - A: {}'.format(lowerCamelCase , lowerCamelCase ) )
return answers
def __lowercase ( ):
UpperCamelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=lowerCamelCase , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=lowerCamelCase , choices=['exact', 'compressed', 'legacy'] , type=lowerCamelCase , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=lowerCamelCase , type=lowerCamelCase , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=lowerCamelCase , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=lowerCamelCase , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=lowerCamelCase , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=lowerCamelCase , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=lowerCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=lowerCamelCase , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=lowerCamelCase , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=lowerCamelCase , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=lowerCamelCase , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
UpperCamelCase_ : Union[str, Any] = parser.parse_args()
UpperCamelCase_ : Union[str, Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def __lowercase ( lowerCamelCase : int ):
UpperCamelCase_ : Any = {}
if args.model_type is None:
UpperCamelCase_ : List[Any] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
UpperCamelCase_ : Optional[int] = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
UpperCamelCase_ : Dict = args.n_docs
if args.index_name is not None:
UpperCamelCase_ : Union[str, Any] = args.index_name
if args.index_path is not None:
UpperCamelCase_ : str = args.index_path
else:
UpperCamelCase_ : Tuple = BartForConditionalGeneration
UpperCamelCase_ : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , lowerCamelCase )
UpperCamelCase_ : Optional[int] = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
UpperCamelCase_ : Optional[int] = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(lowerCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(lowerCamelCase ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
UpperCamelCase_ : List[str] = RagRetriever.from_pretrained(lowerCamelCase , **lowerCamelCase )
UpperCamelCase_ : List[Any] = model_class.from_pretrained(lowerCamelCase , retriever=lowerCamelCase , **lowerCamelCase )
model.retriever.init_retrieval()
else:
UpperCamelCase_ : Optional[Any] = model_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
UpperCamelCase_ : Optional[Any] = []
for line in tqdm(lowerCamelCase ):
questions.append(line.strip() )
if len(lowerCamelCase ) == args.eval_batch_size:
UpperCamelCase_ : Dict = evaluate_batch_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase )
preds_file.write('\n'.join(lowerCamelCase ) + '\n' )
preds_file.flush()
UpperCamelCase_ : Tuple = []
if len(lowerCamelCase ) > 0:
UpperCamelCase_ : Optional[int] = evaluate_batch_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase )
preds_file.write('\n'.join(lowerCamelCase ) )
preds_file.flush()
score_fn(lowerCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
a_ = get_args()
main(args)
| 175 | 0 |
from __future__ import annotations
lowercase : Tuple = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowercase : List[str] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def UpperCAmelCase_ (_lowerCAmelCase : list[float] ):
__UpperCamelCase : Union[str, Any] = []
__UpperCamelCase : List[Any] = len(_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
__UpperCamelCase : float = -1
for j in range(i + 1 , _lowerCAmelCase ):
if arr[i] < arr[j]:
__UpperCamelCase : List[Any] = arr[j]
break
result.append(_lowerCAmelCase )
return result
def UpperCAmelCase_ (_lowerCAmelCase : list[float] ):
__UpperCamelCase : List[str] = []
for i, outer in enumerate(_lowerCAmelCase ):
__UpperCamelCase : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
__UpperCamelCase : Tuple = inner
break
result.append(_lowerCAmelCase )
return result
def UpperCAmelCase_ (_lowerCAmelCase : list[float] ):
__UpperCamelCase : List[str] = len(_lowerCAmelCase )
__UpperCamelCase : list[float] = []
__UpperCamelCase : list[float] = [-1] * arr_size
for index in reversed(range(_lowerCAmelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__UpperCamelCase : Any = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowercase : Any = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
) | 171 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Dict = parent
__UpperCamelCase : List[str] = batch_size
__UpperCamelCase : str = seq_length
__UpperCamelCase : List[Any] = is_training
__UpperCamelCase : str = use_input_mask
__UpperCamelCase : int = use_token_type_ids
__UpperCamelCase : str = use_labels
__UpperCamelCase : List[str] = vocab_size
__UpperCamelCase : List[str] = hidden_size
__UpperCamelCase : List[Any] = num_hidden_layers
__UpperCamelCase : Union[str, Any] = num_attention_heads
__UpperCamelCase : Optional[Any] = intermediate_size
__UpperCamelCase : Optional[int] = hidden_act
__UpperCamelCase : List[str] = hidden_dropout_prob
__UpperCamelCase : List[Any] = attention_probs_dropout_prob
__UpperCamelCase : List[str] = max_position_embeddings
__UpperCamelCase : Union[str, Any] = type_vocab_size
__UpperCamelCase : Optional[Any] = type_sequence_label_size
__UpperCamelCase : Union[str, Any] = initializer_range
__UpperCamelCase : Union[str, Any] = num_labels
__UpperCamelCase : Any = num_choices
__UpperCamelCase : Optional[Any] = scope
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Tuple = None
if self.use_input_mask:
__UpperCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : Optional[int] = None
if self.use_token_type_ids:
__UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : List[str] = None
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : int = None
if self.use_labels:
__UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = LlamaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : List[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
__UpperCamelCase : Any = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : int = True
__UpperCamelCase : Tuple = LlamaModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : Optional[int] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )
__UpperCamelCase : Union[str, Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )
__UpperCamelCase : Dict = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Any:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = LlamaForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : Union[str, Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Any = True
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : List[str] = LlamaForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# first forward pass
__UpperCamelCase : Optional[Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase , )
__UpperCamelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCamelCase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCamelCase : Any = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["hidden_states"][0]
__UpperCamelCase : List[Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["hidden_states"][0]
# select random slice
__UpperCamelCase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
__UpperCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowercase : List[str] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase : Dict = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase : Tuple = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : Tuple = False
lowercase : List[Any] = False
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = LlamaModelTester(self )
__UpperCamelCase : List[str] = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Optional[int] = 3
__UpperCamelCase : int = input_dict["input_ids"]
__UpperCamelCase : Optional[Any] = input_ids.ne(1 ).to(__UpperCamelCase )
__UpperCamelCase : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase : List[str] = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : Optional[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : List[str] = 3
__UpperCamelCase : Any = "single_label_classification"
__UpperCamelCase : List[str] = input_dict["input_ids"]
__UpperCamelCase : Tuple = input_ids.ne(1 ).to(__UpperCamelCase )
__UpperCamelCase : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase : Optional[int] = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : str = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Dict = 3
__UpperCamelCase : Tuple = "multi_label_classification"
__UpperCamelCase : Any = input_dict["input_ids"]
__UpperCamelCase : str = input_ids.ne(1 ).to(__UpperCamelCase )
__UpperCamelCase : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCamelCase : Optional[Any] = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : int = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __lowerCamelCase ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Any = ids_tensor([1, 10] , config.vocab_size )
__UpperCamelCase : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCamelCase : Union[str, Any] = LlamaModel(__UpperCamelCase )
original_model.to(__UpperCamelCase )
original_model.eval()
__UpperCamelCase : int = original_model(__UpperCamelCase ).last_hidden_state
__UpperCamelCase : List[Any] = original_model(__UpperCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCamelCase : Dict = {"type": scaling_type, "factor": 10.0}
__UpperCamelCase : Optional[Any] = LlamaModel(__UpperCamelCase )
scaled_model.to(__UpperCamelCase )
scaled_model.eval()
__UpperCamelCase : Optional[int] = scaled_model(__UpperCamelCase ).last_hidden_state
__UpperCamelCase : Tuple = scaled_model(__UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Tuple = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCamelCase : Tuple = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
__UpperCamelCase : Tuple = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__UpperCamelCase : List[str] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase : Tuple = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : List[Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCamelCase : Dict = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
__UpperCamelCase : str = model(torch.tensor(__UpperCamelCase ) )
# Expected mean on dim = -1
__UpperCamelCase : int = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase : Any = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : Dict = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCamelCase : List[Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
__UpperCamelCase : Any = model(torch.tensor(__UpperCamelCase ) )
# Expected mean on dim = -1
__UpperCamelCase : Any = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase : Union[str, Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Optional[int] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCamelCase : Optional[int] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
__UpperCamelCase : Optional[Any] = model(torch.tensor(__UpperCamelCase ) )
__UpperCamelCase : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# fmt: off
__UpperCamelCase : Tuple = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
__UpperCamelCase : List[str] = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
__UpperCamelCase : List[str] = "Simply put, the theory of relativity states that "
__UpperCamelCase : Optional[Any] = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
__UpperCamelCase : Dict = tokenizer.encode(__UpperCamelCase , return_tensors="pt" )
__UpperCamelCase : Optional[Any] = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=__UpperCamelCase )
# greedy generation outputs
__UpperCamelCase : List[Any] = model.generate(__UpperCamelCase , max_new_tokens=64 , top_p=__UpperCamelCase , temperature=1 , do_sample=__UpperCamelCase )
__UpperCamelCase : Optional[Any] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase ) | 171 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE :Any = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :List[Any] = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__SCREAMING_SNAKE_CASE :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'BlipImageProcessor'
__SCREAMING_SNAKE_CASE : List[Any] = ('BertTokenizer', 'BertTokenizerFast')
def __init__(self , lowercase , lowercase ):
A_ : List[Any] = False
super().__init__(lowercase , lowercase )
A_ : Tuple = self.image_processor
def __call__(self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
A_ : Optional[Any] = self.tokenizer
A_ : Tuple = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
return text_encoding
# add pixel_values
A_ : int = self.image_processor(lowercase , return_tensors=lowercase )
if text is not None:
A_ : Optional[Any] = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
else:
A_ : List[str] = None
if text_encoding is not None:
encoding_image_processor.update(lowercase )
return encoding_image_processor
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
A_ : int = self.tokenizer.model_input_names
A_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 206 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase: List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[str] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 361 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = CycleDiffusionPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
lowercase__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self : Any ):
torch.manual_seed(0 )
a : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
a : str = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=10_00 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
a : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
a : List[str] = CLIPTextModel(__snake_case )
a : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a : Tuple = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase_ ( self : Optional[int] , __snake_case : Dict , __snake_case : Any=0 ):
a : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
a : Optional[Any] = image / 2 + 0.5
if str(__snake_case ).startswith('mps' ):
a : List[str] = torch.manual_seed(__snake_case )
else:
a : Union[str, Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a : List[Any] = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self : Optional[int] ):
a : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : int = self.get_dummy_components()
a : str = CycleDiffusionPipeline(**__snake_case )
a : List[str] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a : Dict = self.get_dummy_inputs(__snake_case )
a : Union[str, Any] = pipe(**__snake_case )
a : List[Any] = output.images
a : Optional[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
a : Tuple = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowercase_ ( self : int ):
a : List[Any] = self.get_dummy_components()
for name, module in components.items():
if hasattr(__snake_case , 'half' ):
a : Any = module.half()
a : Tuple = CycleDiffusionPipeline(**__snake_case )
a : Any = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a : str = self.get_dummy_inputs(__snake_case )
a : int = pipe(**__snake_case )
a : Optional[int] = output.images
a : Tuple = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
a : int = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowercase_ ( self : List[Any] ):
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def lowercase_ ( self : Dict ):
return super().test_inference_batch_single_identical()
@skip_mps
def lowercase_ ( self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowercase_ ( self : Dict ):
return super().test_save_load_optional_components()
@skip_mps
def lowercase_ ( self : List[Any] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
def lowercase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[int] ):
a : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
a : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
a : List[str] = init_image.resize((5_12, 5_12) )
a : Dict = 'CompVis/stable-diffusion-v1-4'
a : List[str] = DDIMScheduler.from_pretrained(__snake_case , subfolder='scheduler' )
a : Any = CycleDiffusionPipeline.from_pretrained(
__snake_case , scheduler=__snake_case , safety_checker=__snake_case , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
a : Union[str, Any] = 'A black colored car'
a : Optional[Any] = 'A blue colored car'
a : int = torch.manual_seed(0 )
a : Optional[Any] = pipe(
prompt=__snake_case , source_prompt=__snake_case , image=__snake_case , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__snake_case , output_type='np' , )
a : Dict = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def lowercase_ ( self : int ):
a : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
a : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
a : str = init_image.resize((5_12, 5_12) )
a : Optional[int] = 'CompVis/stable-diffusion-v1-4'
a : Union[str, Any] = DDIMScheduler.from_pretrained(__snake_case , subfolder='scheduler' )
a : str = CycleDiffusionPipeline.from_pretrained(__snake_case , scheduler=__snake_case , safety_checker=__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
a : Tuple = 'A black colored car'
a : Tuple = 'A blue colored car'
a : List[str] = torch.manual_seed(0 )
a : str = pipe(
prompt=__snake_case , source_prompt=__snake_case , image=__snake_case , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__snake_case , output_type='np' , )
a : Tuple = output.images
assert np.abs(image - expected_image ).max() < 2e-2 | 96 | 0 |
import baseaa
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]:
return baseaa.aaaencode(string.encode('utf-8' ) )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
return baseaa.aaadecode(lowerCamelCase__ ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = []
for part_id in partition_order:
lowercase__ : str = df.where(F"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(lowerCamelCase__ ):
expected_row_ids_and_row_dicts.append((F"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : int = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple = spark.range(100 ).repartition(1 )
lowercase__ : Tuple = Spark(lowerCamelCase__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple = spark.range(10 ).repartition(2 )
lowercase__ : Any = [1, 0]
lowercase__ : Optional[int] = _generate_iterable_examples(lowerCamelCase__ , lowerCamelCase__ ) # Reverse the partitions.
lowercase__ : str = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase__ , lowerCamelCase__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowercase__ , lowercase__ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Union[str, Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : int = spark.range(10 ).repartition(1 )
lowercase__ : Optional[int] = SparkExamplesIterable(lowerCamelCase__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowerCamelCase__ ):
assert row_id == F"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Optional[Any] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
lowercase__ : int = lambda lowerCamelCase__ : x.reverse()
lowercase__ : str = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase__ , [2, 1, 0] )
lowercase__ : int = SparkExamplesIterable(lowerCamelCase__ ).shuffle_data_sources(lowerCamelCase__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowerCamelCase__ ):
lowercase__ , lowercase__ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Union[str, Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Optional[Any] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowercase__ : Optional[Any] = SparkExamplesIterable(lowerCamelCase__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(lowerCamelCase__ ):
lowercase__ , lowercase__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowercase__ : int = SparkExamplesIterable(lowerCamelCase__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(lowerCamelCase__ ):
lowercase__ , lowercase__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : int = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : int = spark.range(100 ).repartition(1 )
lowercase__ : Tuple = Spark(lowerCamelCase__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 130 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Optional[Any]:
__A ,__A : List[Any] = image.size
__A ,__A : Tuple = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__A : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] )
__A : Any = np.array(__snake_case ).astype(np.floataa ) / 255.0
__A : Optional[int] = image[None].transpose(0 , 3 , 1 , 2 )
__A : Any = torch.from_numpy(__snake_case )
return 2.0 * image - 1.0
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase)
@torch.no_grad()
def __call__( self , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = 100 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , ):
'''simple docstring'''
if isinstance(_UpperCAmelCase , PIL.Image.Image):
__A : List[Any] = 1
elif isinstance(_UpperCAmelCase , torch.Tensor):
__A : List[str] = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_UpperCAmelCase)}')
if isinstance(_UpperCAmelCase , PIL.Image.Image):
__A : Optional[Any] = preprocess(_UpperCAmelCase)
__A ,__A : Any = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__A : Tuple = (batch_size, self.unet.config.in_channels // 2, height, width)
__A : Tuple = next(self.unet.parameters()).dtype
__A : int = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase)
__A : Tuple = image.to(device=self.device , dtype=_UpperCAmelCase)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_UpperCAmelCase , device=self.device)
__A : List[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__A : int = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__A : Union[str, Any] = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__A : int = {}
if accepts_eta:
__A : Dict = eta
for t in self.progress_bar(_UpperCAmelCase):
# concat latents and low resolution image in the channel dimension.
__A : List[str] = torch.cat([latents, image] , dim=1)
__A : Optional[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase)
# predict the noise residual
__A : Union[str, Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase).sample
# compute the previous noisy sample x_t -> x_t-1
__A : Union[str, Any] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase).prev_sample
# decode the image latents with the VQVAE
__A : List[str] = self.vqvae.decode(_UpperCAmelCase).sample
__A : Dict = torch.clamp(_UpperCAmelCase , -1.0 , 1.0)
__A : List[str] = image / 2 + 0.5
__A : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__A : Union[str, Any] = self.numpy_to_pil(_UpperCAmelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase) | 190 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __snake_case : int ) -> int:
if not isinstance(__snake_case , __snake_case ):
__A : List[Any] = f'Input value of [number={number}] must be an integer'
raise TypeError(__snake_case )
if number < 1:
__A : Union[str, Any] = f'Input value of [number={number}] must be > 0'
raise ValueError(__snake_case )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__A : Optional[Any] = int(math.log(number // 3 , 2 ) ) + 2
__A : Union[str, Any] = [3, 5]
__A : List[Any] = 2
__A : Optional[Any] = 3
for block in range(1 , __snake_case ):
for _ in range(__snake_case ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
lowercase__ : str = 0
try:
lowercase__ : List[str] = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""") | 190 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase_ = 16
lowercase_ = 32
def a__ ( snake_case , snake_case = 16 , snake_case = "bert-base-cased" ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(snake_case )
__SCREAMING_SNAKE_CASE : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case ):
# max_length=None => use the model max length (it's actually the default)
__SCREAMING_SNAKE_CASE : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case , max_length=snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__SCREAMING_SNAKE_CASE : Union[str, Any] = datasets.map(
snake_case , batched=snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__SCREAMING_SNAKE_CASE : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
__SCREAMING_SNAKE_CASE : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
return train_dataloader, eval_dataloader
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = 0
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : List[str] = model(**snake_case )
__SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case ) - 1:
__SCREAMING_SNAKE_CASE : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__SCREAMING_SNAKE_CASE : List[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case , references=snake_case , )
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute()
return eval_metric["accuracy"]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
# Initialize accelerator
__SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__SCREAMING_SNAKE_CASE : str = config['''lr''']
__SCREAMING_SNAKE_CASE : Tuple = int(config['''num_epochs'''] )
__SCREAMING_SNAKE_CASE : str = int(config['''seed'''] )
__SCREAMING_SNAKE_CASE : Dict = int(config['''batch_size'''] )
__SCREAMING_SNAKE_CASE : List[Any] = args.model_name_or_path
set_seed(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = get_dataloaders(snake_case , snake_case , snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained(snake_case , return_dict=snake_case )
# Instantiate optimizer
__SCREAMING_SNAKE_CASE : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__SCREAMING_SNAKE_CASE : List[Any] = optimizer_cls(params=model.parameters() , lr=snake_case )
if accelerator.state.deepspeed_plugin is not None:
__SCREAMING_SNAKE_CASE : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = 1
__SCREAMING_SNAKE_CASE : int = (len(snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__SCREAMING_SNAKE_CASE : List[str] = get_linear_schedule_with_warmup(
optimizer=snake_case , num_warmup_steps=0 , num_training_steps=snake_case , )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = DummyScheduler(snake_case , total_num_steps=snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case , snake_case )
# We need to keep track of how many total steps we have iterated over
__SCREAMING_SNAKE_CASE : Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = evaluate.load('''glue''' , '''mrpc''' )
__SCREAMING_SNAKE_CASE : Dict = num_epochs
if args.partial_train_epoch is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__SCREAMING_SNAKE_CASE : str = args.resume_from_checkpoint.split('''epoch_''' )[1]
__SCREAMING_SNAKE_CASE : List[Any] = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__SCREAMING_SNAKE_CASE : Any = int(snake_case ) + 1
__SCREAMING_SNAKE_CASE : Optional[int] = evaluation_loop(snake_case , snake_case , snake_case , snake_case )
accelerator.print('''resumed checkpoint performance:''' , snake_case )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , '''r''' ) as f:
__SCREAMING_SNAKE_CASE : int = json.load(snake_case )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__SCREAMING_SNAKE_CASE : Any = {}
for epoch in range(snake_case , snake_case ):
model.train()
for step, batch in enumerate(snake_case ):
__SCREAMING_SNAKE_CASE : str = model(**snake_case )
__SCREAMING_SNAKE_CASE : Any = outputs.loss
__SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''epoch_{epoch}'''
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(args.output_dir , snake_case )
accelerator.save_state(snake_case )
__SCREAMING_SNAKE_CASE : str = evaluation_loop(snake_case , snake_case , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Tuple = accuracy
__SCREAMING_SNAKE_CASE : Tuple = lr_scheduler.get_lr()[0]
__SCREAMING_SNAKE_CASE : List[str] = optimizer.param_groups[0]['''lr''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = epoch
__SCREAMING_SNAKE_CASE : int = overall_step
accelerator.print(F'''epoch {epoch}:''' , snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , '''w''' ) as f:
json.dump(snake_case , snake_case )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case , )
parser.add_argument(
'''--output_dir''' , type=snake_case , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=snake_case , default=snake_case , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=snake_case , default=snake_case , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case , default=2 , help='''Number of train epochs.''' , )
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
__SCREAMING_SNAKE_CASE : Tuple = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case , snake_case )
if __name__ == "__main__":
main()
| 303 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : TransformeraDModel , _A : AutoencoderKL , _A : KarrasDiffusionSchedulers , _A : Optional[Dict[int, str]] = None , ):
"""simple docstring"""
super().__init__()
self.register_modules(transformer=_A , vae=_A , scheduler=_A )
# create a imagenet -> id dictionary for easier use
__SCREAMING_SNAKE_CASE : Optional[int] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = int(_A )
__SCREAMING_SNAKE_CASE : List[str] = dict(sorted(self.labels.items() ) )
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, List[str]] ):
"""simple docstring"""
if not isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(_A )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Dict , _A : List[int] , _A : float = 4.0 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : int = 50 , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = len(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.transformer.config.sample_size
__SCREAMING_SNAKE_CASE : List[Any] = self.transformer.config.in_channels
__SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , )
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(_A , device=self.device ).reshape(-1 )
__SCREAMING_SNAKE_CASE : Any = torch.tensor([1000] * batch_size , device=self.device )
__SCREAMING_SNAKE_CASE : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input[: len(_A ) // 2]
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half, half] , dim=0 )
__SCREAMING_SNAKE_CASE : int = self.scheduler.scale_model_input(_A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = t
if not torch.is_tensor(_A ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__SCREAMING_SNAKE_CASE : Any = latent_model_input.device.type == '''mps'''
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = torch.floataa if is_mps else torch.floataa
else:
__SCREAMING_SNAKE_CASE : int = torch.intaa if is_mps else torch.intaa
__SCREAMING_SNAKE_CASE : int = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__SCREAMING_SNAKE_CASE : Optional[int] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.transformer(
_A , timestep=_A , class_labels=_A ).sample
# perform guidance
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = torch.split(_A , len(_A ) // 2 , dim=0 )
__SCREAMING_SNAKE_CASE : str = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half_eps, half_eps] , dim=0 )
__SCREAMING_SNAKE_CASE : List[str] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = torch.split(_A , _A , dim=1 )
else:
__SCREAMING_SNAKE_CASE : List[Any] = noise_pred
# compute previous image: x_t -> x_t-1
__SCREAMING_SNAKE_CASE : str = self.scheduler.step(_A , _A , _A ).prev_sample
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = latent_model_input.chunk(2 , dim=0 )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input
__SCREAMING_SNAKE_CASE : List[Any] = 1 / self.vae.config.scaling_factor * latents
__SCREAMING_SNAKE_CASE : List[str] = self.vae.decode(_A ).sample
__SCREAMING_SNAKE_CASE : Any = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : int = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : str = self.numpy_to_pil(_A )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_A )
| 303 | 1 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_UpperCAmelCase = """pytorch_model.bin"""
_UpperCAmelCase = """pytorch_model.bin.index.json"""
_UpperCAmelCase = """adapter_config.json"""
_UpperCAmelCase = """adapter_model.bin"""
_UpperCAmelCase = """adapter_model.safetensors"""
_UpperCAmelCase = """tf_model.h5"""
_UpperCAmelCase = """tf_model.h5.index.json"""
_UpperCAmelCase = """model.ckpt"""
_UpperCAmelCase = """flax_model.msgpack"""
_UpperCAmelCase = """flax_model.msgpack.index.json"""
_UpperCAmelCase = """model.safetensors"""
_UpperCAmelCase = """model.safetensors.index.json"""
_UpperCAmelCase = """config.json"""
_UpperCAmelCase = """preprocessor_config.json"""
_UpperCAmelCase = FEATURE_EXTRACTOR_NAME
_UpperCAmelCase = """generation_config.json"""
_UpperCAmelCase = """modelcard.json"""
_UpperCAmelCase = """▁"""
_UpperCAmelCase = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_UpperCAmelCase = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_UpperCAmelCase = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_UpperCAmelCase = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def UpperCamelCase ( __lowercase : Union[str, Any] ):
'''simple docstring'''
if version.parse(__lowercase ) < version.parse(__lowercase ):
if "dev" in min_version:
A_ : str = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
A_ : Tuple = f'''This example requires a minimum version of {min_version},'''
error_message += f''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 364 | from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=__A ):
'''simple docstring'''
lowerCamelCase_ = ['''onnx''']
def __init__( self , *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(self , ['onnx'] )
@classmethod
def lowerCAmelCase_ ( cls , *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(cls , ['onnx'] )
@classmethod
def lowerCAmelCase_ ( cls , *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(cls , ['onnx'] )
| 192 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class snake_case__ :
a_ = BlenderbotSmallConfig
a_ = {}
a_ = "gelu"
def __init__( self : str , _A : List[str] , _A : List[Any]=13 , _A : Dict=7 , _A : List[Any]=True , _A : Optional[Any]=False , _A : Dict=99 , _A : List[str]=32 , _A : Tuple=2 , _A : str=4 , _A : Any=37 , _A : int=0.1 , _A : Tuple=0.1 , _A : List[str]=20 , _A : List[str]=2 , _A : int=1 , _A : Any=0 , ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : str = use_labels
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = eos_token_id
UpperCAmelCase_ : Any = pad_token_id
UpperCAmelCase_ : List[str] = bos_token_id
def A ( self : int ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase_ : Optional[Any] = prepare_blenderbot_small_inputs_dict(_A , _A , _A )
return config, inputs_dict
def A ( self : Any , _A : Union[str, Any] , _A : int ) -> Dict:
UpperCAmelCase_ : str = TFBlenderbotSmallModel(config=_A ).get_decoder()
UpperCAmelCase_ : Optional[Any] = inputs_dict['''input_ids''']
UpperCAmelCase_ : List[Any] = input_ids[:1, :]
UpperCAmelCase_ : Union[str, Any] = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase_ : List[Any] = inputs_dict['''head_mask''']
UpperCAmelCase_ : str = 1
# first forward pass
UpperCAmelCase_ : int = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
UpperCAmelCase_ , UpperCAmelCase_ : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase_ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase_ : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase_ : Dict = model(_A , attention_mask=_A )[0]
UpperCAmelCase_ : List[Any] = model(_A , attention_mask=_A , past_key_values=_A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase_ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_A , _A , rtol=1e-3 )
def __UpperCAmelCase ( A : Optional[Any] , A : str , A : Union[str, Any] , A : Tuple=None , A : Any=None , A : List[Any]=None , A : List[str]=None , A : Union[str, Any]=None , ) -> List[Any]:
if attention_mask is None:
UpperCAmelCase_ : Optional[Any] = tf.cast(tf.math.not_equal(A , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
a_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
a_ = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
a_ = True
a_ = False
a_ = False
def A ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase_ : str = TFBlenderbotSmallModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self , config_class=_A )
def A ( self : List[Any] ) -> Dict:
self.config_tester.run_common_tests()
def A ( self : Optional[int] ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_tokenizers
@require_tf
class snake_case__ ( unittest.TestCase):
a_ = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
a_ = "facebook/blenderbot_small-90M"
@cached_property
def A ( self : Any ) -> str:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def A ( self : Dict ) -> int:
UpperCAmelCase_ : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def A ( self : Any ) -> str:
UpperCAmelCase_ : int = self.tokenizer(self.src_text , return_tensors='''tf''' )
UpperCAmelCase_ : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_A , )
UpperCAmelCase_ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_A )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 304 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 304 | 1 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_a : List[str] = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
_a : Optional[int] = parser.parse_args()
_a : List[str] = """cpu"""
_a : Tuple = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
_a : int = """path-to-your-trained-model"""
_a : List[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_a : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_a : Union[str, Any] = pipe.to(device)
# to channels last
_a : List[str] = pipe.unet.to(memory_format=torch.channels_last)
_a : Any = pipe.vae.to(memory_format=torch.channels_last)
_a : Tuple = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_a : Dict = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_a : Dict = torch.randn(2, 4, 6_4, 6_4)
_a : Any = torch.rand(1) * 9_9_9
_a : str = torch.randn(2, 7_7, 7_6_8)
_a : Optional[int] = (sample, timestep, encoder_hidden_status)
try:
_a : str = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_a : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_a : str = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_a : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_a : Dict = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_a : Any = 6_6_6
_a : int = torch.Generator(device).manual_seed(seed)
_a : int = {"""generator""": generator}
if args.steps is not None:
_a : List[Any] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_a : List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 46 |
'''simple docstring'''
import sys
def _lowerCAmelCase ( lowercase ) -> List[str]:
__lowerCAmelCase = len(lowercase )
__lowerCAmelCase = [[0 for x in range(lowercase )] for x in range(lowercase )]
__lowerCAmelCase = [[0 for x in range(lowercase )] for x in range(lowercase )]
for chain_length in range(2 , lowercase ):
for a in range(1 , n - chain_length + 1 ):
__lowerCAmelCase = a + chain_length - 1
__lowerCAmelCase = sys.maxsize
for c in range(lowercase , lowercase ):
__lowerCAmelCase = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__lowerCAmelCase = cost
__lowerCAmelCase = c
return matrix, sol
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
if i == j:
print("""A""" + str(lowercase ) , end=""" """ )
else:
print("""(""" , end=""" """ )
print_optiomal_solution(lowercase , lowercase , optimal_solution[i][j] )
print_optiomal_solution(lowercase , optimal_solution[i][j] + 1 , lowercase )
print(""")""" , end=""" """ )
def _lowerCAmelCase ( ) -> Dict:
__lowerCAmelCase = [30, 35, 15, 5, 10, 20, 25]
__lowerCAmelCase = len(lowercase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__lowerCAmelCase , __lowerCAmelCase = matrix_chain_order(lowercase )
print("""No. of Operation required: """ + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 46 | 1 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowercase ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = AlbertConfig.from_json_file(_snake_case )
print(f"""Building PyTorch model from configuration: {config}""" )
__snake_case : Tuple = AlbertForPreTraining(_snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 102 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase : List[Any] = logging.get_logger(__name__)
class __UpperCAmelCase ( __snake_case ):
__lowercase = ["pixel_values"]
def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = PILImageResampling.BICUBIC , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = 1 / 2_55 , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
_snake_case = size if size is not None else {"""height""": 2_24, """width""": 2_24}
_snake_case = get_size_dict(lowerCamelCase_ )
_snake_case = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
_snake_case = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ , param_name='crop_size' )
_snake_case = do_resize
_snake_case = do_rescale
_snake_case = do_normalize
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = size
_snake_case = resample
_snake_case = rescale_factor
_snake_case = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = get_size_dict(lowerCamelCase_ )
if "shortest_edge" in size:
_snake_case = get_resize_output_image_size(lowerCamelCase_ , size=size['shortest_edge'] , default_to_square=lowerCamelCase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_snake_case = (size["""height"""], size["""width"""])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCamelCase_ , size=(size['height'], size['width']) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ):
"""simple docstring"""
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowerCamelCase_ , param_name='crop_size' , default_to_square=lowerCamelCase_ )
_snake_case = resample if resample is not None else self.resample
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCamelCase_ )
if not is_batched(lowerCamelCase_ ):
_snake_case = [images]
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
_snake_case = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_ ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
_snake_case = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
_snake_case = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
| 365 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(__A , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 160 | 0 |
import math
def a( A : Any , A : Optional[Any] ) -> str:
"""simple docstring"""
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(__lowerCAmelCase )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
_lowercase: Any = "Enter the base and the power separated by a comma: "
_lowercase , _lowercase: Optional[int] = map(int, input(prompt).split(","))
_lowercase , _lowercase: int = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
_lowercase: Optional[int] = res(xa, ya)
_lowercase: Tuple = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 227 | __snake_case = '''Input must be a string of 8 numbers plus letter'''
__snake_case = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def lowerCAmelCase_ ( __lowerCAmelCase )-> bool:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : Optional[Any] =f'''Expected string as input, found {type(__lowerCAmelCase ).__name__}'''
raise TypeError(__lowerCAmelCase )
UpperCAmelCase : List[Any] =spanish_id.replace('''-''' , '''''' ).upper()
if len(__lowerCAmelCase ) != 9:
raise ValueError(__lowerCAmelCase )
try:
UpperCAmelCase : int =int(spanish_id_clean[0:8] )
UpperCAmelCase : Optional[int] =spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__lowerCAmelCase ) from ex
if letter.isdigit():
raise ValueError(__lowerCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 0 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class _SCREAMING_SNAKE_CASE ( __a ):
def __get__( self : Any , a__ : int , a__ : str=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
__magic_name__ ='''__cached_''' + self.fget.__name__
__magic_name__ =getattr(a__ , a__ , a__ )
if cached is None:
__magic_name__ =self.fget(a__ )
setattr(a__ , a__ , a__ )
return cached
def UpperCamelCase ( a ) -> Dict:
'''simple docstring'''
__magic_name__ =val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'''invalid truth value {val!r}''' )
def UpperCamelCase ( a ) -> Optional[int]:
'''simple docstring'''
if is_torch_fx_proxy(a ):
return True
if is_torch_available():
import torch
if isinstance(a , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(a , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(a , (jnp.ndarray, Tracer) ):
return True
return isinstance(a , np.ndarray )
def UpperCamelCase ( a ) -> List[str]:
'''simple docstring'''
return isinstance(a , np.ndarray )
def UpperCamelCase ( a ) -> Optional[int]:
'''simple docstring'''
return _is_numpy(a )
def UpperCamelCase ( a ) -> Optional[int]:
'''simple docstring'''
import torch
return isinstance(a , torch.Tensor )
def UpperCamelCase ( a ) -> List[Any]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch(a )
def UpperCamelCase ( a ) -> List[str]:
'''simple docstring'''
import torch
return isinstance(a , torch.device )
def UpperCamelCase ( a ) -> Union[str, Any]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(a )
def UpperCamelCase ( a ) -> int:
'''simple docstring'''
import torch
if isinstance(a , a ):
if hasattr(a , a ):
__magic_name__ =getattr(a , a )
else:
return False
return isinstance(a , torch.dtype )
def UpperCamelCase ( a ) -> Dict:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(a )
def UpperCamelCase ( a ) -> Tuple:
'''simple docstring'''
import tensorflow as tf
return isinstance(a , tf.Tensor )
def UpperCamelCase ( a ) -> Any:
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(a )
def UpperCamelCase ( a ) -> List[str]:
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(a , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(a )
return type(a ) == tf.Tensor
def UpperCamelCase ( a ) -> int:
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(a )
def UpperCamelCase ( a ) -> Tuple:
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(a , jnp.ndarray )
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
return False if not is_flax_available() else _is_jax(a )
def UpperCamelCase ( a ) -> Any:
'''simple docstring'''
if isinstance(a , (dict, UserDict) ):
return {k: to_py_obj(a ) for k, v in obj.items()}
elif isinstance(a , (list, tuple) ):
return [to_py_obj(a ) for o in obj]
elif is_tf_tensor(a ):
return obj.numpy().tolist()
elif is_torch_tensor(a ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(a ):
return np.asarray(a ).tolist()
elif isinstance(a , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
if isinstance(a , (dict, UserDict) ):
return {k: to_numpy(a ) for k, v in obj.items()}
elif isinstance(a , (list, tuple) ):
return np.array(a )
elif is_tf_tensor(a ):
return obj.numpy()
elif is_torch_tensor(a ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(a ):
return np.asarray(a )
else:
return obj
class _SCREAMING_SNAKE_CASE ( __a ):
def snake_case__ ( self : Dict ):
__magic_name__ =fields(self )
# Safety and consistency checks
if not len(a__ ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
__magic_name__ =getattr(self , class_fields[0].name )
__magic_name__ =all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(a__ ):
if isinstance(a__ , a__ ):
__magic_name__ =first_field.items()
__magic_name__ =True
else:
try:
__magic_name__ =iter(a__ )
__magic_name__ =True
except TypeError:
__magic_name__ =False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(a__ ):
if (
not isinstance(a__ , (list, tuple) )
or not len(a__ ) == 2
or not isinstance(element[0] , a__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__magic_name__ =first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__magic_name__ =element[1]
elif first_field is not None:
__magic_name__ =first_field
else:
for field in class_fields:
__magic_name__ =getattr(self , field.name )
if v is not None:
__magic_name__ =v
def __delitem__( self : List[Any] , *a__ : Union[str, Any] , **a__ : Tuple ):
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def snake_case__ ( self : List[str] , *a__ : List[Any] , **a__ : Optional[int] ):
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def snake_case__ ( self : str , *a__ : List[str] , **a__ : str ):
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def snake_case__ ( self : List[Any] , *a__ : Optional[int] , **a__ : List[str] ):
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : str , a__ : Tuple ):
if isinstance(a__ , a__ ):
__magic_name__ =dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[int] , a__ : Tuple , a__ : str ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(a__ , a__ )
super().__setattr__(a__ , a__ )
def __setitem__( self : Optional[Any] , a__ : Any , a__ : List[str] ):
# Will raise a KeyException if needed
super().__setitem__(a__ , a__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(a__ , a__ )
def snake_case__ ( self : Optional[int] ):
return tuple(self[k] for k in self.keys() )
class _SCREAMING_SNAKE_CASE ( __a ,__a ):
@classmethod
def snake_case__ ( cls : int , a__ : Dict ):
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = """longest"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = """max_length"""
__SCREAMING_SNAKE_CASE :Tuple = """do_not_pad"""
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Optional[int] = """pt"""
__SCREAMING_SNAKE_CASE :Any = """tf"""
__SCREAMING_SNAKE_CASE :str = """np"""
__SCREAMING_SNAKE_CASE :List[Any] = """jax"""
class _SCREAMING_SNAKE_CASE :
def __init__( self : str , a__ : List[ContextManager] ):
__magic_name__ =context_managers
__magic_name__ =ExitStack()
def __enter__( self : Union[str, Any] ):
for context_manager in self.context_managers:
self.stack.enter_context(a__ )
def __exit__( self : Optional[int] , *a__ : List[str] , **a__ : Dict ):
self.stack.__exit__(*a__ , **a__ )
def UpperCamelCase ( a ) -> Optional[int]:
'''simple docstring'''
__magic_name__ =infer_framework(a )
if framework == "tf":
__magic_name__ =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__magic_name__ =inspect.signature(model_class.forward ) # PyTorch models
else:
__magic_name__ =inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def UpperCamelCase ( a ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ =model_class.__name__
__magic_name__ =infer_framework(a )
if framework == "tf":
__magic_name__ =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__magic_name__ =inspect.signature(model_class.forward ) # PyTorch models
else:
__magic_name__ =inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def UpperCamelCase ( a , a = "" , a = "." ) -> Optional[int]:
'''simple docstring'''
def _flatten_dict(a , a="" , a="." ):
for k, v in d.items():
__magic_name__ =str(a ) + delimiter + str(a ) if parent_key else k
if v and isinstance(a , a ):
yield from flatten_dict(a , a , delimiter=a ).items()
else:
yield key, v
return dict(_flatten_dict(a , a , a ) )
@contextmanager
def UpperCamelCase ( a , a = False ) -> Any:
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def UpperCamelCase ( a , a=None ) -> List[Any]:
'''simple docstring'''
if is_numpy_array(a ):
return np.transpose(a , axes=a )
elif is_torch_tensor(a ):
return array.T if axes is None else array.permute(*a )
elif is_tf_tensor(a ):
import tensorflow as tf
return tf.transpose(a , perm=a )
elif is_jax_tensor(a ):
return jnp.transpose(a , axes=a )
else:
raise ValueError(F'''Type not supported for transpose: {type(a )}.''' )
def UpperCamelCase ( a , a ) -> List[Any]:
'''simple docstring'''
if is_numpy_array(a ):
return np.reshape(a , a )
elif is_torch_tensor(a ):
return array.reshape(*a )
elif is_tf_tensor(a ):
import tensorflow as tf
return tf.reshape(a , a )
elif is_jax_tensor(a ):
return jnp.reshape(a , a )
else:
raise ValueError(F'''Type not supported for reshape: {type(a )}.''' )
def UpperCamelCase ( a , a=None ) -> int:
'''simple docstring'''
if is_numpy_array(a ):
return np.squeeze(a , axis=a )
elif is_torch_tensor(a ):
return array.squeeze() if axis is None else array.squeeze(dim=a )
elif is_tf_tensor(a ):
import tensorflow as tf
return tf.squeeze(a , axis=a )
elif is_jax_tensor(a ):
return jnp.squeeze(a , axis=a )
else:
raise ValueError(F'''Type not supported for squeeze: {type(a )}.''' )
def UpperCamelCase ( a , a ) -> Union[str, Any]:
'''simple docstring'''
if is_numpy_array(a ):
return np.expand_dims(a , a )
elif is_torch_tensor(a ):
return array.unsqueeze(dim=a )
elif is_tf_tensor(a ):
import tensorflow as tf
return tf.expand_dims(a , axis=a )
elif is_jax_tensor(a ):
return jnp.expand_dims(a , axis=a )
else:
raise ValueError(F'''Type not supported for expand_dims: {type(a )}.''' )
def UpperCamelCase ( a ) -> List[str]:
'''simple docstring'''
if is_numpy_array(a ):
return np.size(a )
elif is_torch_tensor(a ):
return array.numel()
elif is_tf_tensor(a ):
import tensorflow as tf
return tf.size(a )
elif is_jax_tensor(a ):
return array.size
else:
raise ValueError(F'''Type not supported for expand_dims: {type(a )}.''' )
def UpperCamelCase ( a , a ) -> List[Any]:
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(a , (tuple, list) ):
__magic_name__ =[F'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
__magic_name__ =F'''{repo_id}--{value}'''
return auto_map
def UpperCamelCase ( a ) -> Optional[int]:
'''simple docstring'''
for base_class in inspect.getmro(a ):
__magic_name__ =base_class.__module__
__magic_name__ =base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'''Could not infer framework from class {model_class}.''' )
| 351 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def UpperCamelCase ( a="ro" , a="en" , a="wmt16" , a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
__magic_name__ = F'''{src_lang}-{tgt_lang}'''
print(F'''Converting {dataset}-{pair}''' )
__magic_name__ = datasets.load_dataset(a , a )
if save_dir is None:
__magic_name__ = F'''{dataset}-{pair}'''
__magic_name__ = Path(a )
save_dir.mkdir(exist_ok=a )
for split in ds.keys():
print(F'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
__magic_name__ = '''val''' if split == '''validation''' else split
__magic_name__ = save_dir.joinpath(F'''{fn}.source''' )
__magic_name__ = save_dir.joinpath(F'''{fn}.target''' )
__magic_name__ = src_path.open('''w+''' )
__magic_name__ = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__magic_name__ = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(F'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 98 | 0 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def A ( snake_case__ , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
SCREAMING_SNAKE_CASE__ = quote(snake_case__ )
return hfh.hf_hub_url(snake_case__ , snake_case__ , repo_type="""dataset""" , revision=snake_case__ )
| 165 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Dict = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
A_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 165 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ : Any = 16
lowerCAmelCase_ : List[Any] = 32
def _lowerCamelCase ( lowercase : Accelerator , lowercase : int = 16 ) -> Optional[Any]:
_a = AutoTokenizer.from_pretrained("bert-base-cased" )
_a = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_a = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a = 16
elif accelerator.mixed_precision != "no":
_a = 8
else:
_a = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
_a = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
_a = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ : List[str] = mocked_dataloaders # noqa: F811
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Union[str, Any] ) -> Optional[Any]:
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1":
_a = 2
# Initialize accelerator
_a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a = config["lr"]
_a = int(config["num_epochs"] )
_a = int(config["seed"] )
_a = int(config["batch_size"] )
_a = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
_a = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a = batch_size // MAX_GPU_BATCH_SIZE
_a = MAX_GPU_BATCH_SIZE
set_seed(lowercase )
_a , _a = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a = model.to(accelerator.device )
# Instantiate optimizer
_a = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
_a = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a = model(**lowercase )
_a = outputs.loss
_a = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_a = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**lowercase )
_a = outputs.logits.argmax(dim=-1 )
_a , _a = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowercase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_a = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_a = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowercase , references=lowercase , )
_a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowercase )
def _lowerCamelCase ( ) -> List[str]:
_a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_a = parser.parse_args()
_a = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 363 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , *__a : Optional[int] , **__a : List[str] ):
super().__init__(*__a , **__a )
self.check_model_type(__a )
def UpperCamelCase__ ( self : Optional[Any] , __a : Dict=None , __a : int=None , __a : Optional[Any]=None , **__a : List[Any] ):
_a , _a = {}, {}
if padding is not None:
_a = padding
if truncation is not None:
_a = truncation
if top_k is not None:
_a = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any] , __a : Union["Image.Image", str] , __a : str = None , **__a : Any ):
if isinstance(__a , (Image.Image, str) ) and isinstance(__a , __a ):
_a = {"image": image, "question": question}
else:
_a = image
_a = super().__call__(__a , **__a )
return results
def UpperCamelCase__ ( self : Tuple , __a : Tuple , __a : Optional[Any]=False , __a : List[Any]=False ):
_a = load_image(inputs["image"] )
_a = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=__a , truncation=__a )
_a = self.image_processor(images=__a , return_tensors=self.framework )
model_inputs.update(__a )
return model_inputs
def UpperCamelCase__ ( self : List[Any] , __a : List[str] ):
_a = self.model(**__a )
return model_outputs
def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : Dict=5 ):
if top_k > self.model.config.num_labels:
_a = self.model.config.num_labels
if self.framework == "pt":
_a = model_outputs.logits.sigmoid()[0]
_a , _a = probs.topk(__a )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
_a = scores.tolist()
_a = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
| 346 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : str = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 52 |
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowercase__ : str = set()
# Replace all the whitespace in our sentence
lowercase__ : Tuple = input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 26
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowercase__ : Dict = [False] * 26
for char in input_str:
if char.islower():
lowercase__ : List[Any] = True
elif char.isupper():
lowercase__ : Optional[Any] = True
return all(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def snake_case__ ( ):
'''simple docstring'''
from timeit import timeit
lowercase__ : Union[str, Any] = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('is_pangram_faster()' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('is_pangram_fastest()' , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 214 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", F"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", F"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", F"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", F"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.weight""", F"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", F"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", F"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", F"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.weight""", F"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", F"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", F"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", F"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", F"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", F"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.bias""", F"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", F"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", F"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", F"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.bias""", F"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", F"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
__lowerCamelCase : Tuple = state_dict.pop(__UpperCAmelCase )
__lowerCamelCase : Any = val
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
__lowerCamelCase : List[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__lowerCamelCase : Optional[int] = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
__lowerCamelCase : List[Any] = value
else:
__lowerCamelCase : List[str] = value
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple:
__lowerCamelCase : Any = ''''''
if is_panoptic:
__lowerCamelCase : Union[str, Any] = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCamelCase : Dict = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__lowerCamelCase : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase : List[Any] = in_proj_weight[:2_5_6, :]
__lowerCamelCase : List[str] = in_proj_bias[:2_5_6]
__lowerCamelCase : Dict = in_proj_weight[2_5_6:5_1_2, :]
__lowerCamelCase : Union[str, Any] = in_proj_bias[2_5_6:5_1_2]
__lowerCamelCase : Dict = in_proj_weight[-2_5_6:, :]
__lowerCamelCase : List[Any] = in_proj_bias[-2_5_6:]
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
__lowerCamelCase : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCamelCase : Optional[int] = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Tuple = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__lowerCamelCase : str = '''resnet101'''
if "dc5" in model_name:
__lowerCamelCase : Dict = True
__lowerCamelCase : Dict = '''panoptic''' in model_name
if is_panoptic:
__lowerCamelCase : Optional[int] = 2_5_0
else:
__lowerCamelCase : int = 9_1
__lowerCamelCase : Union[str, Any] = '''huggingface/label-files'''
__lowerCamelCase : int = '''coco-detection-id2label.json'''
__lowerCamelCase : str = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase : Any = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
__lowerCamelCase : List[str] = idalabel
__lowerCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
# load image processor
__lowerCamelCase : str = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__lowerCamelCase : Any = ConditionalDetrImageProcessor(format=__UpperCAmelCase )
# prepare image
__lowerCamelCase : List[Any] = prepare_img()
__lowerCamelCase : Optional[int] = image_processor(images=__UpperCAmelCase , return_tensors='pt' )
__lowerCamelCase : Tuple = encoding['''pixel_values''']
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
__lowerCamelCase : Dict = torch.hub.load('DeppMeng/ConditionalDETR' , __UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
__lowerCamelCase : Optional[int] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__lowerCamelCase : int = '''conditional_detr.''' + src
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase : Optional[Any] = rename_backbone_keys(__UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__UpperCAmelCase , is_panoptic=__UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCamelCase : Dict = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
__lowerCamelCase : str = state_dict.pop(__UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowerCamelCase : str = state_dict.pop(__UpperCAmelCase )
__lowerCamelCase : Optional[int] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
__lowerCamelCase : Any = state_dict.pop(__UpperCAmelCase )
__lowerCamelCase : List[Any] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
__lowerCamelCase : str = state_dict.pop(__UpperCAmelCase )
__lowerCamelCase : Dict = val
# finally, create HuggingFace model and load state dict
__lowerCamelCase : List[str] = ConditionalDetrForSegmentation(__UpperCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
model.push_to_hub(repo_id=__UpperCAmelCase , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
__lowerCamelCase : str = conditional_detr(__UpperCAmelCase )
__lowerCamelCase : Tuple = model(__UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
a =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 352 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a ={
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 113 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
_A = """http://www.mocksite.com/file1.txt"""
_A = """\"text\": [\"foo\", \"foo\"]"""
_A = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _lowerCamelCase :
_lowerCamelCase :Any = 200
_lowerCamelCase :List[Any] = {"Content-Length": "100"}
_lowerCamelCase :Dict = {}
def _lowerCAmelCase ( self : Optional[int] , **UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
return [bytes(UpperCamelCase , """utf-8""" )]
def lowercase_ ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
import requests
monkeypatch.setattr(__UpperCAmelCase , """request""" , __UpperCAmelCase )
lowerCAmelCase__ : int = URL
if issubclass(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = url
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : List[str] = [url]
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = {"""train""": url}
lowerCAmelCase__ : List[Any] = """dummy"""
lowerCAmelCase__ : Optional[int] = """downloads"""
lowerCAmelCase__ : Optional[Any] = tmp_path
lowerCAmelCase__ : int = DownloadConfig(
cache_dir=os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , use_etag=__UpperCAmelCase , )
lowerCAmelCase__ : Any = DownloadManager(dataset_name=__UpperCAmelCase , download_config=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = dl_manager.download(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : List[str] = [downloaded_paths]
lowerCAmelCase__ : Any = [urls]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
assert "train" in downloaded_paths.keys()
lowerCAmelCase__ : Union[str, Any] = downloaded_paths.values()
lowerCAmelCase__ : int = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__UpperCAmelCase , __UpperCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowerCAmelCase__ : int = Path(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowerCAmelCase__ : Tuple = downloaded_path.read_text()
assert content == CONTENT
lowerCAmelCase__ : Optional[Any] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
lowerCAmelCase__ : List[Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
lowerCAmelCase__ : Any = str(__UpperCAmelCase )
if issubclass(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : int = filename
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = [filename]
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = {"""train""": filename}
lowerCAmelCase__ : Any = """dummy"""
lowerCAmelCase__ : Tuple = xz_file.parent
lowerCAmelCase__ : Dict = """extracted"""
lowerCAmelCase__ : List[Any] = DownloadConfig(
cache_dir=__UpperCAmelCase , use_etag=__UpperCAmelCase , )
lowerCAmelCase__ : int = DownloadManager(dataset_name=__UpperCAmelCase , download_config=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = dl_manager.extract(__UpperCAmelCase )
lowerCAmelCase__ : int = paths
for extracted_paths in [extracted_paths]:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Dict = [extracted_paths]
lowerCAmelCase__ : Dict = [paths]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
assert "train" in extracted_paths.keys()
lowerCAmelCase__ : List[str] = extracted_paths.values()
lowerCAmelCase__ : Optional[int] = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__UpperCAmelCase , __UpperCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowerCAmelCase__ : Tuple = Path(__UpperCAmelCase )
lowerCAmelCase__ : int = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__UpperCAmelCase , etag=__UpperCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowerCAmelCase__ : List[Any] = extracted_path.read_text()
lowerCAmelCase__ : Optional[int] = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(__UpperCAmelCase , start=1 ):
lowerCAmelCase__ : int = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : Dict = request.getfixturevalue(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__UpperCAmelCase ) , start=1 ):
_test_jsonl(__UpperCAmelCase , __UpperCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : Dict = request.getfixturevalue(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__UpperCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__UpperCAmelCase ) , start=1 ):
_test_jsonl(__UpperCAmelCase , __UpperCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowercase_ ( __UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : Optional[Any] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__UpperCAmelCase ) , start=1 ):
assert os.path.basename(__UpperCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 242 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_A = logging.get_logger(__name__)
class _lowerCamelCase :
def __init__( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : int ) -> str:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = question_encoder
lowerCAmelCase__ : Optional[int] = generator
lowerCAmelCase__ : Optional[int] = self.question_encoder
def _lowerCAmelCase ( self : Dict , UpperCamelCase : Optional[Any] ) -> str:
"""simple docstring"""
if os.path.isfile(UpperCamelCase ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
lowerCAmelCase__ : Dict = os.path.join(UpperCamelCase , """question_encoder_tokenizer""" )
lowerCAmelCase__ : List[Any] = os.path.join(UpperCamelCase , """generator_tokenizer""" )
self.question_encoder.save_pretrained(UpperCamelCase )
self.generator.save_pretrained(UpperCamelCase )
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] , UpperCamelCase : List[str] , **UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
lowerCAmelCase__ : Dict = kwargs.pop("""config""" , UpperCamelCase )
if config is None:
lowerCAmelCase__ : int = RagConfig.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : List[str] = AutoTokenizer.from_pretrained(
UpperCamelCase , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
lowerCAmelCase__ : List[str] = AutoTokenizer.from_pretrained(
UpperCamelCase , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=UpperCamelCase , generator=UpperCamelCase )
def __call__( self : Dict , *UpperCamelCase : List[Any] , **UpperCamelCase : Union[str, Any] ) -> int:
"""simple docstring"""
return self.current_tokenizer(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Dict , *UpperCamelCase : Tuple , **UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return self.generator.batch_decode(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
return self.generator.decode(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.question_encoder
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.generator
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : List[str] , UpperCamelCase : Optional[List[str]] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : str = "longest" , UpperCamelCase : str = None , UpperCamelCase : bool = True , **UpperCamelCase : Union[str, Any] , ) -> BatchEncoding:
"""simple docstring"""
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , UpperCamelCase , )
if max_length is None:
lowerCAmelCase__ : Any = self.current_tokenizer.model_max_length
lowerCAmelCase__ : Tuple = self(
UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors=UpperCamelCase , max_length=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , **UpperCamelCase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCAmelCase__ : Tuple = self.current_tokenizer.model_max_length
lowerCAmelCase__ : Tuple = self(
text_target=UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors=UpperCamelCase , padding=UpperCamelCase , max_length=UpperCamelCase , truncation=UpperCamelCase , **UpperCamelCase , )
lowerCAmelCase__ : Any = labels["""input_ids"""]
return model_inputs
| 242 | 1 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = "" ) -> dict[str, float]:
lowercase__: List[str] = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
lowercase__: int = BeautifulSoup(requests.get(__UpperCAmelCase ).text , '''html.parser''' )
lowercase__: List[str] = soup.find_all('''td''' , attrs='''titleColumn''' )
lowercase__: Optional[int] = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__UpperCAmelCase , __UpperCAmelCase )
}
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = "IMDb_Top_250_Movies.csv" ) -> None:
lowercase__: Optional[int] = get_imdb_top_aaa_movies()
with open(__UpperCAmelCase , '''w''' , newline='''''' ) as out_file:
lowercase__: str = csv.writer(__UpperCAmelCase )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 359 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__A = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = "rag"
_UpperCAmelCase :List[Any] = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=" / " , _UpperCAmelCase=" // " , _UpperCAmelCase=5 , _UpperCAmelCase=300 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase="wiki_dpr" , _UpperCAmelCase="train" , _UpperCAmelCase="compressed" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(
bos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , prefix=_UpperCAmelCase , vocab_size=_UpperCAmelCase , **_UpperCAmelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowercase__: Optional[Any] = kwargs.pop('''question_encoder''' )
lowercase__: Any = question_encoder_config.pop('''model_type''' )
lowercase__: Tuple = kwargs.pop('''generator''' )
lowercase__: Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase__: Optional[int] = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Any = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: str = reduce_loss
lowercase__: str = label_smoothing
lowercase__: Dict = exclude_bos_score
lowercase__: Any = do_marginalize
lowercase__: Optional[int] = title_sep
lowercase__: Any = doc_sep
lowercase__: Any = n_docs
lowercase__: List[Any] = max_combined_length
lowercase__: int = dataset
lowercase__: int = dataset_split
lowercase__: str = index_name
lowercase__: Dict = retrieval_vector_size
lowercase__: Dict = retrieval_batch_size
lowercase__: List[str] = passages_path
lowercase__: str = index_path
lowercase__: Optional[Any] = use_dummy_dataset
lowercase__: str = output_retrieved
lowercase__: List[str] = do_deduplication
lowercase__: List[Any] = use_cache
if self.forced_eos_token_id is None:
lowercase__: int = getattr(self.generator , '''forced_eos_token_id''' , _UpperCAmelCase )
@classmethod
def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = copy.deepcopy(self.__dict__ )
lowercase__: str = self.question_encoder.to_dict()
lowercase__: str = self.generator.to_dict()
lowercase__: str = self.__class__.model_type
return output
| 2 | 0 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[2, 2, 3, 2] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=["stage2", "stage3", "stage4"] , _lowerCamelCase=3 , _lowerCamelCase=None , ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Any = batch_size
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : int = num_stages
UpperCAmelCase__ : int = hidden_sizes
UpperCAmelCase__ : Any = depths
UpperCAmelCase__ : List[Any] = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : str = type_sequence_label_size
UpperCAmelCase__ : str = initializer_range
UpperCAmelCase__ : Dict = out_features
UpperCAmelCase__ : List[str] = num_labels
UpperCAmelCase__ : List[Any] = scope
UpperCAmelCase__ : List[str] = num_stages
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Tuple = None
if self.use_labels:
UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _a (self ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _a (self ):
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowerCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowerCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = UperNetForSemanticSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = config_and_inputs
UpperCAmelCase__ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = UperNetModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def _a (self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a (self ):
"""simple docstring"""
return
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(_lowerCamelCase )
UpperCAmelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Dict = [*signature.parameters.keys()]
UpperCAmelCase__ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _a (self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCAmelCase__ : str = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Dict = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCAmelCase__ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ : Any = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : List[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = _config_zero_init(_lowerCamelCase )
UpperCAmelCase__ : Any = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[int] = model_class(config=_lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def _a (self ):
"""simple docstring"""
pass
@slow
def _a (self ):
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Any = UperNetForSemanticSegmentation.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def a__ ( ) -> Any:
UpperCAmelCase__ : Any = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
UpperCAmelCase__ : Dict = Image.open(lowerCAmelCase ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
UpperCAmelCase__ : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = prepare_img()
UpperCAmelCase__ : Union[str, Any] = processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
with torch.no_grad():
UpperCAmelCase__ : Any = model(**_lowerCamelCase )
UpperCAmelCase__ : str = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
UpperCAmelCase__ : Tuple = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_lowerCamelCase )
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : str = processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**_lowerCamelCase )
UpperCAmelCase__ : List[str] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCAmelCase__ : int = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
| 171 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_A = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def a__ ( lowerCAmelCase ) -> List[Any]:
UpperCAmelCase__ : Dict = _TestCommandArgs(dataset=lowerCAmelCase , all_configs=lowerCAmelCase , save_infos=lowerCAmelCase )
UpperCAmelCase__ : List[Any] = TestCommand(*lowerCAmelCase )
test_command.run()
UpperCAmelCase__ : List[Any] = os.path.join(lowerCAmelCase , """README.md""" )
assert os.path.exists(lowerCAmelCase )
UpperCAmelCase__ : List[str] = DatasetInfosDict.from_directory(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2_35_15_63,
"""num_examples""": 1_00_00,
},
{
"""name""": """validation""",
"""num_bytes""": 23_84_18,
"""num_examples""": 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = getattr(dataset_infos["""default"""] , lowerCAmelCase ), getattr(expected_dataset_infos["""default"""] , lowerCAmelCase )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase , lowerCAmelCase )
elif key == "splits":
assert list(lowerCAmelCase ) == list(lowerCAmelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 171 | 1 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE :int = logging.getLogger(__name__)
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , )-> Dict:
"""simple docstring"""
UpperCamelCase_ = bnb_quantization_config.load_in_abit
UpperCamelCase_ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
UpperCamelCase_ = []
# custom device map
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(device_map.keys() ) > 1:
UpperCamelCase_ = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCamelCase_ = get_keys_to_not_convert(SCREAMING_SNAKE_CASE_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCamelCase_ = []
UpperCamelCase_ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(SCREAMING_SNAKE_CASE_ )
# compatibility with peft
UpperCamelCase_ = load_in_abit
UpperCamelCase_ = load_in_abit
UpperCamelCase_ = get_parameter_device(SCREAMING_SNAKE_CASE_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
UpperCamelCase_ = replace_with_bnb_layers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , modules_to_not_convert=SCREAMING_SNAKE_CASE_ )
# convert param to the right dtype
UpperCamelCase_ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCamelCase_ = name.replace(".weight" , "" ).replace(".bias" , "" )
UpperCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
param.to(SCREAMING_SNAKE_CASE_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
f"The model device type is {model_device.type}. However, cuda is needed for quantization."
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
f"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " )
else:
with init_empty_weights():
UpperCamelCase_ = replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , modules_to_not_convert=SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = get_quantized_model_device_map(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , max_memory=SCREAMING_SNAKE_CASE_ , no_split_module_classes=SCREAMING_SNAKE_CASE_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCamelCase_ = True
UpperCamelCase_ = any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE_ , offload_state_dict=SCREAMING_SNAKE_CASE_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(SCREAMING_SNAKE_CASE_ , device_map=SCREAMING_SNAKE_CASE_ , offload_dir=SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None )-> Optional[int]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
UpperCamelCase_ = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
UpperCamelCase_ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCamelCase_ = {}
UpperCamelCase_ = special_dtypes
UpperCamelCase_ = no_split_module_classes
UpperCamelCase_ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCamelCase_ = get_balanced_memory(
SCREAMING_SNAKE_CASE_ , low_zero=(device_map == "balanced_low_0") , max_memory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase_ = max_memory
UpperCamelCase_ = infer_auto_device_map(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# check if don't have any quantized module on the cpu
UpperCamelCase_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCamelCase_ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None )-> Tuple:
"""simple docstring"""
if modules_to_not_convert is None:
UpperCamelCase_ = []
UpperCamelCase_ , UpperCamelCase_ = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , )-> List[str]:
"""simple docstring"""
UpperCamelCase_ = False
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase_ = []
current_key_name.append(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCamelCase_ = ".".join(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCamelCase_ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCamelCase_ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCamelCase_ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
UpperCamelCase_ = module.weight.data
if module.bias is not None:
UpperCamelCase_ = module.bias.data
bnb_module.requires_grad_(SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = True
if len(list(module.children() ) ) > 0:
UpperCamelCase_ , UpperCamelCase_ = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Union[str, Any]:
"""simple docstring"""
with init_empty_weights():
UpperCamelCase_ = deepcopy(SCREAMING_SNAKE_CASE_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCamelCase_ = find_tied_parameters(SCREAMING_SNAKE_CASE_ )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase_ = sum(SCREAMING_SNAKE_CASE_ , [] )
UpperCamelCase_ = len(SCREAMING_SNAKE_CASE_ ) > 0
# Check if it is a base model
UpperCamelCase_ = False
if hasattr(SCREAMING_SNAKE_CASE_ , "base_model_prefix" ):
UpperCamelCase_ = not hasattr(SCREAMING_SNAKE_CASE_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase_ = list(model.named_children() )
UpperCamelCase_ = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase_ = set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = list(set(SCREAMING_SNAKE_CASE_ ) ) + list(SCREAMING_SNAKE_CASE_ )
# remove ".weight" from the keys
UpperCamelCase_ = [".weight", ".bias"]
UpperCamelCase_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase_ = name.replace(SCREAMING_SNAKE_CASE_ , "" )
filtered_module_names.append(SCREAMING_SNAKE_CASE_ )
return filtered_module_names
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Tuple:
"""simple docstring"""
for m in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , bnb.nn.Linearabit ):
return True
return False
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
return next(parameter.parameters() ).device
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0 , dtype=SCREAMING_SNAKE_CASE_ , value=SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = param_name
UpperCamelCase_ = model
if "." in tensor_name:
UpperCamelCase_ = tensor_name.split("." )
for split in splits[:-1]:
UpperCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
UpperCamelCase_ = new_module
UpperCamelCase_ = splits[-1]
# offload weights
UpperCamelCase_ = False
offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , )
else:
offload_weight(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
offload_weight(SCREAMING_SNAKE_CASE_ , param_name.replace("weight" , "SCB" ) , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
set_module_tensor_to_device(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , "meta" , dtype=SCREAMING_SNAKE_CASE_ , value=torch.empty(*param.size() ) )
| 357 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __magic_name__ :
def __init__( self , _lowercase , _lowercase=2 , _lowercase=32 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=True , _lowercase=32 , _lowercase=4 , _lowercase=[0, 1, 2, 3] , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=3 , _lowercase=[1, 384, 24, 24] , _lowercase=True , _lowercase=None , )-> List[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = backbone_out_indices
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = backbone_featmap_shape
UpperCamelCase_ = scope
UpperCamelCase_ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_ = (image_size // patch_size) ** 2
UpperCamelCase_ = num_patches + 1
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_lowercase , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Any:
UpperCamelCase_ = DPTModel(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Union[str, Any]:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = DPTForDepthEstimation(_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> str:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = DPTForSemanticSegmentation(_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase_ :Dict = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase_ :Any = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ :List[Any] = False
UpperCamelCase_ :Dict = False
UpperCamelCase_ :Tuple = False
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = DPTModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def UpperCAmelCase_ ( self )-> Any:
pass
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_lowercase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowercase )
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_lowercase )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase )
def UpperCAmelCase_ ( self )-> Optional[Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = True
if model_class in get_values(_lowercase ):
continue
UpperCamelCase_ = model_class(_lowercase )
model.to(_lowercase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
UpperCamelCase_ = model(**_lowercase ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> Optional[int]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = False
UpperCamelCase_ = True
if model_class in get_values(_lowercase ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase_ = model_class(_lowercase )
model.to(_lowercase )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase_ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
UpperCamelCase_ = model(**_lowercase ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = _config_zero_init(_lowercase )
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(config=_lowercase )
# Skip the check for the backbone
UpperCamelCase_ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase_ = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCAmelCase_ ( self )-> int:
pass
@slow
def UpperCAmelCase_ ( self )-> List[Any]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase_ = DPTModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def UpperCAmelCase_ ( self )-> List[str]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = "add"
with self.assertRaises(_lowercase ):
UpperCamelCase_ = DPTForDepthEstimation(_lowercase )
def lowerCAmelCase( )-> Any:
"""simple docstring"""
UpperCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
UpperCamelCase_ = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(_lowercase )
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_lowercase , return_tensors="pt" ).to(_lowercase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_lowercase )
UpperCamelCase_ = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase_ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _lowercase )
UpperCamelCase_ = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _lowercase , atol=1e-4 ) )
| 60 | 0 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def a_ ( __snake_case : int , __snake_case : Optional[int] , __snake_case : Optional[Any]=None , **__snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[x.strip() for x in open(__snake_case ).readlines()]
lowerCamelCase_ =[x.strip() for x in open(__snake_case ).readlines()][: len(__snake_case )]
lowerCamelCase_ =calculate_rouge(__snake_case , __snake_case , **__snake_case )
if save_path is not None:
save_json(__snake_case , __snake_case , indent=__snake_case )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 75 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ = logging.getLogger()
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : List[Any] = os.path.join(lowercase__ , 'all_results.json' )
if os.path.exists(lowercase__ ):
with open(lowercase__ , 'r' ) as f:
_lowerCamelCase : List[Any] = json.load(lowercase__ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
lowercase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def A_ ( self ):
import xla_spawn
_lowerCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : List[Any] = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowercase , 'argv' , lowercase ):
_lowerCamelCase : Dict = time()
xla_spawn.main()
_lowerCamelCase : Any = time()
_lowerCamelCase : Optional[int] = get_results(lowercase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def A_ ( self ):
import xla_spawn
_lowerCamelCase : Tuple = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(lowercase , 'argv' , lowercase ):
xla_spawn.main() | 96 | 0 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowercase : Optional[Any] = sys.version_info >= (3, 10)
def lowerCAmelCase__ ( _a : int=None , _a : int=None ):
return field(default_factory=lambda: default , metadata=_a )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : int
A : float
A : str
A : bool
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : int = 42
A : str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : bool = False
A : bool = True
A : Optional[bool] = None
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = 'titi'
A : List[str] = 'toto'
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = 'titi'
A : List[Any] = 'toto'
A : Union[str, Any] = 42
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : BasicEnum = "toto"
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : int = BasicEnum(self.foo )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : MixedTypeEnum = "toto"
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Union[str, Any] = MixedTypeEnum(self.foo )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : Optional[int] = None
A : Optional[float] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
A : Optional[str] = None
A : Optional[List[str]] = list_field(default=[] )
A : Optional[List[int]] = list_field(default=[] )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : List[int] = list_field(default=[] )
A : List[int] = list_field(default=[1, 2, 3] )
A : List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
A : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : List[int] = field()
A : str = field()
A : BasicEnum = field()
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : Union[str, Any] = BasicEnum(self.required_enum )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : int
A : "BasicEnum" = field()
A : "Optional[bool]" = None
A : "str" = field(default='toto' , metadata={'help': 'help message'} )
A : "List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : bool = False
A : bool = True
A : bool | None = None
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : int | None = None
A : float | None = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
A : str | None = None
A : list[str] | None = list_field(default=[] )
A : list[int] | None = list_field(default=[] )
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ : Tuple = {k: v for k, v in vars(_SCREAMING_SNAKE_CASE ).items() if k != "container"}
snake_case_ : str = {k: v for k, v in vars(_SCREAMING_SNAKE_CASE ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , _SCREAMING_SNAKE_CASE ) and yy.get("choices" , _SCREAMING_SNAKE_CASE ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](_SCREAMING_SNAKE_CASE ) , yy["type"](_SCREAMING_SNAKE_CASE ) )
del xx["type"], yy["type"]
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : int = HfArgumentParser(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("--foo" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
expected.add_argument("--bar" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
expected.add_argument("--baz" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
expected.add_argument("--flag" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , const=_SCREAMING_SNAKE_CASE , nargs="?" )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((snake_case_) , ) : Union[str, Any] = parser.parse_args_into_dataclasses(_SCREAMING_SNAKE_CASE , look_for_args_file=_SCREAMING_SNAKE_CASE )
self.assertFalse(example.flag )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : List[Any] = HfArgumentParser(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=_SCREAMING_SNAKE_CASE )
expected.add_argument("--baz" , default="toto" , type=_SCREAMING_SNAKE_CASE , help="help message" )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("--foo" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , const=_SCREAMING_SNAKE_CASE , nargs="?" )
expected.add_argument("--baz" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , const=_SCREAMING_SNAKE_CASE , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=_SCREAMING_SNAKE_CASE , dest="baz" )
expected.add_argument("--opt" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
snake_case_ : Union[str, Any] = HfArgumentParser(_SCREAMING_SNAKE_CASE )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = parser.parse_args([] )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , opt=_SCREAMING_SNAKE_CASE ) )
snake_case_ : List[str] = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , opt=_SCREAMING_SNAKE_CASE ) )
snake_case_ : Tuple = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , opt=_SCREAMING_SNAKE_CASE ) )
snake_case_ : Tuple = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , opt=_SCREAMING_SNAKE_CASE ) )
snake_case_ : Union[str, Any] = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , opt=_SCREAMING_SNAKE_CASE ) )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : Tuple = HfArgumentParser(_SCREAMING_SNAKE_CASE )
snake_case_ : int = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
snake_case_ : Union[str, Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ : List[Any] = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
snake_case_ : Union[str, Any] = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ : Optional[Any] = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
snake_case_ : List[str] = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _lowerCAmelCase ( self ) -> List[Any]:
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : Literal["titi", "toto", 42] = "toto"
snake_case_ : List[str] = HfArgumentParser(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Any = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
snake_case_ : str = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
snake_case_ : int = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : Dict = HfArgumentParser(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=_SCREAMING_SNAKE_CASE )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=_SCREAMING_SNAKE_CASE )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=_SCREAMING_SNAKE_CASE )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=_SCREAMING_SNAKE_CASE )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Dict = parser.parse_args([] )
self.assertEqual(
_SCREAMING_SNAKE_CASE , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ : List[str] = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def _lowerCAmelCase ( self ) -> int:
snake_case_ : List[Any] = argparse.ArgumentParser()
expected.add_argument("--foo" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
expected.add_argument("--bar" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="help message" )
expected.add_argument("--baz" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
expected.add_argument("--ces" , nargs="+" , default=[] , type=_SCREAMING_SNAKE_CASE )
expected.add_argument("--des" , nargs="+" , default=[] , type=_SCREAMING_SNAKE_CASE )
snake_case_ : Any = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
snake_case_ : Any = HfArgumentParser(_SCREAMING_SNAKE_CASE )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Dict = parser.parse_args([] )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , bar=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , ces=[] , des=[] ) )
snake_case_ : Optional[int] = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : str = HfArgumentParser(_SCREAMING_SNAKE_CASE )
snake_case_ : Any = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
expected.add_argument("--required_str" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=_SCREAMING_SNAKE_CASE , )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : str = HfArgumentParser(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = argparse.ArgumentParser()
expected.add_argument("--foo" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=_SCREAMING_SNAKE_CASE , )
expected.add_argument("--opt" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
expected.add_argument("--baz" , default="toto" , type=_SCREAMING_SNAKE_CASE , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=_SCREAMING_SNAKE_CASE )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Optional[Any] = HfArgumentParser(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
snake_case_ : Optional[int] = parser.parse_dict(_SCREAMING_SNAKE_CASE )[0]
snake_case_ : str = BasicExample(**_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Optional[Any] = HfArgumentParser(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(_SCREAMING_SNAKE_CASE , parser.parse_dict , _SCREAMING_SNAKE_CASE , allow_extra_keys=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : Tuple = HfArgumentParser(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : str = os.path.join(_SCREAMING_SNAKE_CASE , "temp_json" )
os.mkdir(_SCREAMING_SNAKE_CASE )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
snake_case_ : int = BasicExample(**_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : Dict = HfArgumentParser(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , "temp_yaml" )
os.mkdir(_SCREAMING_SNAKE_CASE )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
snake_case_ : Union[str, Any] = BasicExample(**_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : Optional[Any] = HfArgumentParser(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 36 |
import argparse
import copy
def lowerCAmelCase__ ( _a : List[Any] ):
snake_case_ : List[Any] = {}
with open(_a ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ : int = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ : Dict = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ : Dict = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ : int = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowerCAmelCase__ ( _a : Optional[Any] , _a : Optional[int] ):
with open(_a ) as f:
snake_case_ : List[str] = f.read(1 )
snake_case_ : Optional[Any] = start_node
snake_case_ : Optional[Any] = []
snake_case_ : Optional[int] = start_node
snake_case_ : int = 0
while visiting not in first_solution:
snake_case_ : List[str] = 1_00_00
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_a ) and k[0] not in first_solution:
snake_case_ : List[str] = k[1]
snake_case_ : Dict = k[0]
first_solution.append(_a )
snake_case_ : Dict = distance_of_first_solution + int(_a )
snake_case_ : Optional[int] = best_node
first_solution.append(_a )
snake_case_ : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_00_00
)
return first_solution, distance_of_first_solution
def lowerCAmelCase__ ( _a : Optional[int] , _a : List[str] ):
snake_case_ : Optional[Any] = []
for n in solution[1:-1]:
snake_case_ : Any = solution.index(_a )
for kn in solution[1:-1]:
snake_case_ : Any = solution.index(_a )
if n == kn:
continue
snake_case_ : Optional[int] = copy.deepcopy(_a )
snake_case_ : int = kn
snake_case_ : Any = n
snake_case_ : List[Any] = 0
for k in _tmp[:-1]:
snake_case_ : str = _tmp[_tmp.index(_a ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ : Any = distance + int(i[1] )
_tmp.append(_a )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ : List[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _a : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowerCAmelCase__ ( _a : Dict , _a : Optional[int] , _a : Optional[Any] , _a : Union[str, Any] , _a : int ):
snake_case_ : str = 1
snake_case_ : List[str] = first_solution
snake_case_ : int = []
snake_case_ : Optional[Any] = distance_of_first_solution
snake_case_ : int = solution
while count <= iters:
snake_case_ : Optional[Any] = find_neighborhood(_a , _a )
snake_case_ : Union[str, Any] = 0
snake_case_ : List[Any] = neighborhood[index_of_best_solution]
snake_case_ : Dict = len(_a ) - 1
snake_case_ : List[Any] = False
while not found:
snake_case_ : int = 0
while i < len(_a ):
if best_solution[i] != solution[i]:
snake_case_ : str = best_solution[i]
snake_case_ : Any = solution[i]
break
snake_case_ : Dict = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ : Optional[Any] = True
snake_case_ : Optional[int] = best_solution[:-1]
snake_case_ : List[Any] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ : Union[str, Any] = cost
snake_case_ : Optional[int] = solution
else:
snake_case_ : Union[str, Any] = index_of_best_solution + 1
snake_case_ : int = neighborhood[index_of_best_solution]
if len(_a ) >= size:
tabu_list.pop(0 )
snake_case_ : List[str] = count + 1
return best_solution_ever, best_cost
def lowerCAmelCase__ ( _a : str=None ):
snake_case_ : Optional[Any] = generate_neighbours(args.File )
snake_case_ , snake_case_ : List[Any] = generate_first_solution(
args.File , _a )
snake_case_ , snake_case_ : int = tabu_search(
_a , _a , _a , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 36 | 1 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL | 190 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class SCREAMING_SNAKE_CASE (a__ ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = tempfile.mkdtemp()
__A : str = 5
# Realm tok
__A : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__A : Union[str, Any] = os.path.join(self.tmpdirname , 'realm_tokenizer')
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
__A : Tuple = os.path.join(_UpperCAmelCase , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
__A : int = os.path.join(self.tmpdirname , 'realm_block_records')
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer'))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = RealmConfig(num_block_records=self.num_block_records)
return config
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
})
return dataset
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = np.array(
[
B'This is the first record',
B'This is the second record',
B'This is the third record',
B'This is the fourth record',
B'This is the fifth record',
B'This is a longer longer longer record',
] , dtype=_UpperCAmelCase , )
return block_records
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.get_config()
__A : str = self.get_dummy_retriever()
__A : List[str] = retriever.tokenizer
__A : Dict = np.array([0, 3] , dtype='long')
__A : Dict = tokenizer(['Test question']).input_ids
__A : Optional[Any] = tokenizer(
['the fourth'] , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , ).input_ids
__A : str = config.reader_seq_len
__A ,__A ,__A ,__A : List[str] = retriever(
_UpperCAmelCase , _UpperCAmelCase , answer_ids=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='np')
self.assertEqual(len(_UpperCAmelCase) , 2)
self.assertEqual(len(_UpperCAmelCase) , 2)
self.assertEqual(len(_UpperCAmelCase) , 2)
self.assertEqual(concat_inputs.input_ids.shape , (2, 10))
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10))
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10))
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10))
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.get_config()
__A : Any = self.get_dummy_retriever()
__A : str = retriever.tokenizer
__A : Dict = np.array([0, 3, 5] , dtype='long')
__A : Tuple = tokenizer(['Test question']).input_ids
__A : Union[str, Any] = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , ).input_ids
__A : Dict = config.reader_seq_len
__A ,__A ,__A ,__A : str = retriever(
_UpperCAmelCase , _UpperCAmelCase , answer_ids=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='np')
self.assertEqual([False, True, True] , _UpperCAmelCase)
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _UpperCAmelCase)
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
# Test local path
__A : str = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
self.assertEqual(retriever.block_records[0] , B'This is the first record')
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download') as mock_hf_hub_download:
__A : int = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records') , _REALM_BLOCK_RECORDS_FILENAME)
__A : Tuple = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa')
self.assertEqual(retriever.block_records[0] , B'This is the first record') | 190 | 1 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(
image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 218 |
import doctest
from collections import deque
import numpy as np
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] ) -> None:
SCREAMING_SNAKE_CASE__ = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE__ = [1, 2, 3, 4]
def lowercase_ ( self : Optional[int] ) -> list[float]:
SCREAMING_SNAKE_CASE__ = len(self.first_signal )
SCREAMING_SNAKE_CASE__ = len(self.second_signal )
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , __lowerCamelCase )
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE__ = [[0] * max_length for i in range(__lowerCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = deque(self.second_signal )
rotated_signal.rotate(__lowerCamelCase )
for j, item in enumerate(__lowerCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE__ = np.matmul(np.transpose(__lowerCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__lowerCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 218 | 1 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""} )
__lowerCAmelCase = field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
__lowerCAmelCase = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""} )
__lowerCAmelCase = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__lowerCAmelCase = field(default=2 , metadata={"""help""": """Batch size for training."""} )
__lowerCAmelCase = field(default=2 , metadata={"""help""": """Batch size for evaluation."""} )
__lowerCAmelCase = field(default=0.1 , metadata={"""help""": """Value of weight decay."""} )
__lowerCAmelCase = field(
default=10_000 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
__lowerCAmelCase = field(default=2e-4 , metadata={"""help""": """Learning rate fo training."""} )
__lowerCAmelCase = field(default="""cosine""" , metadata={"""help""": """Learning rate."""} )
__lowerCAmelCase = field(
default=750 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
__lowerCAmelCase = field(
default=16 , metadata={"""help""": """Number of gradient accumulation steps."""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
__lowerCAmelCase = field(default=50_000 , metadata={"""help""": """Maximum number of training steps."""} )
__lowerCAmelCase = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__lowerCAmelCase = field(default=1_024 , metadata={"""help""": """Sequence lengths used for training."""} )
__lowerCAmelCase = field(default=1 , metadata={"""help""": """Training seed."""} )
__lowerCAmelCase = field(
default=1_024 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
__lowerCAmelCase = field(default=__lowerCAmelCase , metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__lowerCAmelCase = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__lowerCAmelCase = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""} )
__lowerCAmelCase = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__lowerCAmelCase = field(default=1_024 , metadata={"""help""": """Length of sequences to be evaluated."""} )
__lowerCAmelCase = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__lowerCAmelCase = field(default=__lowerCAmelCase , metadata={"""help""": """Number of workers used for code evaluation."""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Sample from the language model\'s output distribution."""} )
__lowerCAmelCase = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""} )
__lowerCAmelCase = field(default=256 , metadata={"""help""": """Maximum number of newly generated tokens."""} )
__lowerCAmelCase = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""} )
__lowerCAmelCase = field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
__lowerCAmelCase = field(default=10 , metadata={"""help""": """Number of generations to run in parallel."""} )
__lowerCAmelCase = field(
default=200 , metadata={"""help""": """Number of completions to generate for each sample."""} )
__lowerCAmelCase = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
__lowerCAmelCase = field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""} )
__lowerCAmelCase = field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
__lowerCAmelCase = field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
__lowerCAmelCase = field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""} )
__lowerCAmelCase = field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""} )
__lowerCAmelCase = field(
default=100_000 , metadata={"""help""": """Number of files to save per JSON output file."""} )
__lowerCAmelCase = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__lowerCAmelCase = field(
default=1_000 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
__lowerCAmelCase = field(
default=100 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
__lowerCAmelCase = field(
default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
__lowerCAmelCase = field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
__lowerCAmelCase = field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
__lowerCAmelCase = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """If True, near-duplicate samples are removed."""} )
__lowerCAmelCase = field(
default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
__lowerCAmelCase = field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""} )
__lowerCAmelCase = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__lowerCAmelCase = field(default=200_000 , metadata={"""help""": """Number of examples to train tokenizer on."""} )
__lowerCAmelCase = field(
default=32_768 , metadata={"""help""": """Number of examples to train the tokenizer on."""} )
__lowerCAmelCase = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""} )
__lowerCAmelCase = field(default=__lowerCAmelCase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} )
__lowerCAmelCase = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
__lowerCAmelCase = field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""} )
__lowerCAmelCase = field(default=__lowerCAmelCase , metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""} )
__lowerCAmelCase = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""} )
__lowerCAmelCase = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""} )
__lowerCAmelCase = field(default=__lowerCAmelCase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 343 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A_ : List[str] = {
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 192 | 0 |
"""simple docstring"""
lowercase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def _snake_case ( ):
_lowerCamelCase : str = input('Enter message: ' )
_lowerCamelCase : Optional[int] = input('Enter key [alphanumeric]: ' )
_lowerCamelCase : Dict = input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
_lowerCamelCase : Union[str, Any] = 'encrypt'
_lowerCamelCase : List[Any] = encrypt_message(lowercase__ , lowercase__ )
elif mode.lower().startswith('d' ):
_lowerCamelCase : List[str] = 'decrypt'
_lowerCamelCase : Any = decrypt_message(lowercase__ , lowercase__ )
print(f'''\n{mode.title()}ed message:''' )
print(lowercase__ )
def _snake_case ( lowercase__ , lowercase__ ):
return translate_message(lowercase__ , lowercase__ , 'encrypt' )
def _snake_case ( lowercase__ , lowercase__ ):
return translate_message(lowercase__ , lowercase__ , 'decrypt' )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : List[Any] = key.upper()
for symbol in message:
_lowerCamelCase : Optional[Any] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowercase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowercase__ ):
_lowerCamelCase : Optional[Any] = 0
else:
translated.append(lowercase__ )
return "".join(lowercase__ )
if __name__ == "__main__":
main()
| 367 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowercase__ = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
lowercase__ = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
lowercase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def _snake_case ( lowercase__ ):
_lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _snake_case ( lowercase__ ):
return x[0]
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = get_letter_count(lowercase__ )
_lowerCamelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowercase__ )
_lowerCamelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowercase__ )
_lowerCamelCase : Optional[int] = ''.join(freq_to_letter[freq] )
_lowerCamelCase : Any = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowercase__ , reverse=lowercase__ )
_lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowercase__ )
def _snake_case ( lowercase__ ):
_lowerCamelCase : str = get_frequency_order(lowercase__ )
_lowerCamelCase : Union[str, Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod() | 12 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
SCREAMING_SNAKE_CASE__ = {"allegro/herbert-base-cased": 514}
SCREAMING_SNAKE_CASE__ = {}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = HerbertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase="</s>" , **lowercase , ) -> List[Any]:
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , sep_token=lowercase , **lowercase , )
def _snake_case ( self , lowercase , lowercase = None ) -> List[int]:
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _snake_case ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is None:
return [1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1] + ([0] * len(lowercase )) + [1]
def _snake_case ( self , lowercase , lowercase = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 46 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'longformer'
def __init__( self , lowercase = 512 , lowercase = 2 , lowercase = 1 , lowercase = 0 , lowercase = 2 , lowercase = 30_522 , lowercase = 768 , lowercase = 12 , lowercase = 12 , lowercase = 3_072 , lowercase = "gelu" , lowercase = 0.1 , lowercase = 0.1 , lowercase = 512 , lowercase = 2 , lowercase = 0.02 , lowercase = 1e-12 , lowercase = False , **lowercase , ) -> Optional[int]:
super().__init__(pad_token_id=lowercase , **lowercase )
lowerCAmelCase = attention_window
lowerCAmelCase = sep_token_id
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = onnx_export
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase = "default" , lowercase = None ) -> Tuple:
super().__init__(lowercase , lowercase , lowercase )
lowerCAmelCase = True
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
lowerCAmelCase = super().outputs
if self.task == "default":
lowerCAmelCase = {0: """batch"""}
return outputs
@property
def _snake_case ( self ) -> float:
return 1e-4
@property
def _snake_case ( self ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def _snake_case ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]:
lowerCAmelCase = super().generate_dummy_inputs(
preprocessor=lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCAmelCase = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
lowerCAmelCase = 1
return inputs
| 46 | 1 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : int = tempfile.mkdtemp()
snake_case_ : Any = 8
# DPR tok
snake_case_ : List[str] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
snake_case_ : Union[str, Any] = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
snake_case_ : Any = os.path.join(_SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
snake_case_ : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case_ : Optional[Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
snake_case_ : Tuple = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case_ : Optional[int] = {"unk_token": "<unk>"}
snake_case_ : Optional[Any] = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : List[str] = os.path.join(_SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_SCREAMING_SNAKE_CASE ) )
def _lowerCAmelCase ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def _lowerCAmelCase ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def _lowerCAmelCase ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : List[str] = os.path.join(self.tmpdirname , "rag_tokenizer" )
snake_case_ : Any = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
snake_case_ : Any = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_SCREAMING_SNAKE_CASE )
rag_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = RagTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _SCREAMING_SNAKE_CASE )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _SCREAMING_SNAKE_CASE )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : Dict = RagTokenizer.from_pretrained("facebook/rag-token-nq" )
snake_case_ : Dict = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
snake_case_ : int = tokenizer(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@slow
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : int = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" )
snake_case_ : Optional[int] = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
snake_case_ : str = tokenizer(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 36 |
lowercase : Optional[int] = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 36 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
a : int = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
snake_case_ = random.Random()
snake_case_ = 1
for dim in shape:
total_dims *= dim
snake_case_ = []
for _ in range(__UpperCAmelCase ):
values.append(rng.randint(0, vocab_size - 1 ) )
snake_case_ = np.array(__UpperCAmelCase, dtype=jnp.intaa ).reshape(__UpperCAmelCase )
return output
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=None ) -> List[Any]:
'''simple docstring'''
snake_case_ = ids_tensor(__UpperCAmelCase, vocab_size=2, rng=__UpperCAmelCase )
# make sure that at least one token is attended to for each batch
snake_case_ = 1
return attn_mask
@require_flax
class a :
snake_case_ = None
snake_case_ = ()
def A_ ( self : Tuple ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
snake_case_ = 2
snake_case_ = inputs['''input_ids'''].shape[-1] // 2
snake_case_ = inputs['''input_ids'''][:max_batch_size, :sequence_length]
snake_case_ = jnp.ones_like(lowercase_ )
snake_case_ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
snake_case_ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
snake_case_ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def A_ ( self : int ):
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = self._get_input_ids_and_config()
snake_case_ = False
snake_case_ = max_length
snake_case_ = 0
for model_class in self.all_generative_model_classes:
snake_case_ = model_class(lowercase_ )
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(lowercase_ , lowercase_ )
snake_case_ = pt_model_class(lowercase_ ).eval()
snake_case_ = load_flax_weights_in_pytorch_model(lowercase_ , flax_model.params )
snake_case_ = flax_model.generate(lowercase_ ).sequences
snake_case_ = pt_model.generate(torch.tensor(lowercase_ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
snake_case_ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def A_ ( self : Dict ):
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = self._get_input_ids_and_config()
snake_case_ = False
snake_case_ = max_length
for model_class in self.all_generative_model_classes:
snake_case_ = model_class(lowercase_ )
snake_case_ = model.generate(lowercase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase_ )
snake_case_ = jit(model.generate )
snake_case_ = jit_generate(lowercase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self : Optional[int] ):
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = self._get_input_ids_and_config()
snake_case_ = True
snake_case_ = max_length
for model_class in self.all_generative_model_classes:
snake_case_ = model_class(lowercase_ )
snake_case_ = model.generate(lowercase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase_ )
snake_case_ = jit(model.generate )
snake_case_ = jit_generate(lowercase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self : Optional[Any] ):
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = self._get_input_ids_and_config()
snake_case_ = False
snake_case_ = max_length
snake_case_ = 2
for model_class in self.all_generative_model_classes:
snake_case_ = model_class(lowercase_ )
snake_case_ = model.generate(lowercase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase_ )
snake_case_ = jit(model.generate )
snake_case_ = jit_generate(lowercase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self : Optional[int] ):
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = self._get_input_ids_and_config()
snake_case_ = False
snake_case_ = max_length
snake_case_ = 2
snake_case_ = 2
for model_class in self.all_generative_model_classes:
snake_case_ = model_class(lowercase_ )
snake_case_ = model.generate(lowercase_ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def A_ ( self : Union[str, Any] ):
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = self._get_input_ids_and_config()
snake_case_ = True
snake_case_ = max_length
snake_case_ = 0.8
snake_case_ = 10
snake_case_ = 0.3
snake_case_ = 1
snake_case_ = 8
snake_case_ = 9
for model_class in self.all_generative_model_classes:
snake_case_ = model_class(lowercase_ )
snake_case_ = model.generate(lowercase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase_ )
snake_case_ = jit(model.generate )
snake_case_ = jit_generate(lowercase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self : Tuple ):
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = self._get_input_ids_and_config()
snake_case_ = max_length
snake_case_ = 1
snake_case_ = 8
snake_case_ = 9
for model_class in self.all_generative_model_classes:
snake_case_ = model_class(lowercase_ )
snake_case_ = model.generate(lowercase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase_ )
snake_case_ = jit(model.generate )
snake_case_ = jit_generate(lowercase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self : Dict ):
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = self._get_input_ids_and_config()
snake_case_ = max_length
snake_case_ = 2
snake_case_ = 1
snake_case_ = 8
snake_case_ = 9
for model_class in self.all_generative_model_classes:
snake_case_ = model_class(lowercase_ )
snake_case_ = model.generate(lowercase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase_ )
snake_case_ = jit(model.generate )
snake_case_ = jit_generate(lowercase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self : int ):
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case_ = attention_mask.at[(0, 0)].set(0 )
snake_case_ = False
snake_case_ = max_length
for model_class in self.all_generative_model_classes:
snake_case_ = model_class(lowercase_ )
snake_case_ = model.generate(lowercase_ , attention_mask=lowercase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase_ )
snake_case_ = jit(model.generate )
snake_case_ = jit_generate(lowercase_ , attention_mask=lowercase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self : Union[str, Any] ):
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case_ = attention_mask.at[(0, 0)].set(0 )
snake_case_ = True
snake_case_ = max_length
for model_class in self.all_generative_model_classes:
snake_case_ = model_class(lowercase_ )
snake_case_ = model.generate(lowercase_ , attention_mask=lowercase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase_ )
snake_case_ = jit(model.generate )
snake_case_ = jit_generate(lowercase_ , attention_mask=lowercase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self : str ):
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case_ = attention_mask.at[(0, 0)].set(0 )
snake_case_ = 2
snake_case_ = max_length
for model_class in self.all_generative_model_classes:
snake_case_ = model_class(lowercase_ )
snake_case_ = model.generate(lowercase_ , attention_mask=lowercase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase_ )
snake_case_ = jit(model.generate )
snake_case_ = jit_generate(lowercase_ , attention_mask=lowercase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class a ( unittest.TestCase ):
def A_ ( self : int ):
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
snake_case_ = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
snake_case_ = '''Hello world'''
snake_case_ = tokenizer(lowercase_ , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowercase_ , '''do_samples''' ):
model.generate(lowercase_ , do_samples=lowercase_ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowercase_ , '''foo''' ):
snake_case_ = {'''foo''': '''bar'''}
model.generate(lowercase_ , **lowercase_ )
| 56 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
__lowerCAmelCase = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Any = AudioClassificationPipeline(model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase )
# test with a raw waveform
__a : Optional[Any] = np.zeros((34000,) )
__a : Union[str, Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
__a , __a : Dict = examples
__a : Tuple = audio_classifier(_UpperCAmelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
_UpperCAmelCase , [
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
] , )
__a : List[Any] = audio_classifier(_UpperCAmelCase , top_k=1 )
self.assertEqual(
_UpperCAmelCase , [
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
] , )
self.run_torchaudio(_UpperCAmelCase )
@require_torchaudio
def _lowerCamelCase ( self , _UpperCAmelCase ):
import datasets
# test with a local file
__a : Tuple = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
__a : Union[str, Any] = dataset[0]['''audio''']['''array''']
__a : Tuple = audio_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
] , )
@require_torch
def _lowerCamelCase ( self ):
__a : Optional[Any] = '''anton-l/wav2vec2-random-tiny-classifier'''
__a : Union[str, Any] = pipeline('''audio-classification''' , model=_UpperCAmelCase )
__a : Optional[int] = np.ones((8000,) )
__a : Optional[int] = audio_classifier(_UpperCAmelCase , top_k=4 )
__a : Tuple = [
{'''score''': 0.0_8_4_2, '''label''': '''no'''},
{'''score''': 0.0_8_3_8, '''label''': '''up'''},
{'''score''': 0.0_8_3_7, '''label''': '''go'''},
{'''score''': 0.0_8_3_4, '''label''': '''right'''},
]
__a : Dict = [
{'''score''': 0.0_8_4_5, '''label''': '''stop'''},
{'''score''': 0.0_8_4_4, '''label''': '''on'''},
{'''score''': 0.0_8_4_1, '''label''': '''right'''},
{'''score''': 0.0_8_3_4, '''label''': '''left'''},
]
self.assertIn(nested_simplify(_UpperCAmelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
__a : List[Any] = {'''array''': np.ones((8000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
__a : Optional[Any] = audio_classifier(_UpperCAmelCase , top_k=4 )
self.assertIn(nested_simplify(_UpperCAmelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _lowerCamelCase ( self ):
import datasets
__a : Tuple = '''superb/wav2vec2-base-superb-ks'''
__a : Optional[int] = pipeline('''audio-classification''' , model=_UpperCAmelCase )
__a : int = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
__a : Any = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
__a : Tuple = audio_classifier(_UpperCAmelCase , top_k=4 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=3 ) , [
{'''score''': 0.9_8_1, '''label''': '''go'''},
{'''score''': 0.0_0_7, '''label''': '''up'''},
{'''score''': 0.0_0_6, '''label''': '''_unknown_'''},
{'''score''': 0.0_0_1, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def _lowerCamelCase ( self ):
pass | 160 | 0 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( snake_case : list , snake_case : int | None = None , snake_case : int | None = None )-> None:
if start is None:
_lowerCamelCase = 0
if end is None:
_lowerCamelCase = len(snake_case ) - 1
if start >= end:
return
_lowerCamelCase = (start + end) // 2
slowsort(snake_case , snake_case , snake_case )
slowsort(snake_case , mid + 1 , snake_case )
if sequence[end] < sequence[mid]:
_lowerCamelCase , _lowerCamelCase = sequence[mid], sequence[end]
slowsort(snake_case , snake_case , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
A_ : int =logging.get_logger(__name__)
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = "vision-encoder-decoder"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def __init__( self , **a__ ):
super().__init__(**a__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'A configuraton of type {self.model_type} cannot be instantiated because '
F'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' )
_lowerCamelCase = kwargs.pop('encoder' )
_lowerCamelCase = encoder_config.pop('model_type' )
_lowerCamelCase = kwargs.pop('decoder' )
_lowerCamelCase = decoder_config.pop('model_type' )
_lowerCamelCase = AutoConfig.for_model(a__ , **a__ )
_lowerCamelCase = AutoConfig.for_model(a__ , **a__ )
_lowerCamelCase = True
@classmethod
def snake_case_ ( cls , a__ , a__ , **a__ ):
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_lowerCamelCase = True
_lowerCamelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **a__ )
def snake_case_ ( self ):
_lowerCamelCase = copy.deepcopy(self.__dict__ )
_lowerCamelCase = self.encoder.to_dict()
_lowerCamelCase = self.decoder.to_dict()
_lowerCamelCase = self.__class__.model_type
return output
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : int = version.parse("1.11" )
@property
def snake_case_ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case_ ( self ):
return 1e-4
@property
def snake_case_ ( self ):
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class __a ( lowerCAmelCase__ ):
@property
def snake_case_ ( self ):
_lowerCamelCase = OrderedDict()
_lowerCamelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_lowerCamelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_lowerCamelCase = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def snake_case_ ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ):
import torch
_lowerCamelCase = OrderedDict()
_lowerCamelCase = super().generate_dummy_inputs(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
_lowerCamelCase , _lowerCamelCase = dummy_input['input_ids'].shape
_lowerCamelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowerCamelCase = dummy_input.pop('input_ids' )
_lowerCamelCase = dummy_input.pop('attention_mask' )
_lowerCamelCase = torch.zeros(a__ )
return common_inputs
class __a ( lowerCAmelCase__ ):
@property
def snake_case_ ( self ):
pass
def snake_case_ ( self , a__ ):
return VisionEncoderDecoderEncoderOnnxConfig(a__ )
def snake_case_ ( self , a__ , a__ , a__ = "default" ):
_lowerCamelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(a__ , a__ )
| 80 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase = ['gpt2']
lowerCAmelCase = 'gpt2'
if is_tf_available():
class _a ( tf.Module ):
def __init__( self: Tuple , UpperCamelCase_: Tuple ) -> Any:
"""simple docstring"""
super().__init__()
lowercase__ = tokenizer
lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ )
lowercase__ = TFGPTaLMHeadModel.from_config(UpperCamelCase_ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.tokenizer(UpperCamelCase_ )
lowercase__ = tokenized['''input_ids'''].to_tensor()
lowercase__ = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowercase__ = self.model(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
super().setUp()
lowercase__ = [GPTaTokenizer.from_pretrained(UpperCamelCase_ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowercase__ = [TFGPTaTokenizer.from_pretrained(UpperCamelCase_ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowercase__ = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
lowercase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowercase__ = tokenizer([test_inputs] , return_tensors='''tf''' )
lowercase__ = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowercase__ = python_outputs[key].numpy()
lowercase__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase_ , tf.intaa ) == tf_outputs_values ) )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowercase__ = tf.function(UpperCamelCase_ )
for test_inputs in self.test_sentences:
lowercase__ = tf.constant(UpperCamelCase_ )
lowercase__ = compiled_tokenizer(UpperCamelCase_ )
lowercase__ = tf_tokenizer(UpperCamelCase_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ) -> Tuple:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowercase__ = ModelToSave(tokenizer=UpperCamelCase_ )
lowercase__ = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase__ = model.serving(UpperCamelCase_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowercase__ = Path(UpperCamelCase_ ) / '''saved.model'''
tf.saved_model.save(UpperCamelCase_ , UpperCamelCase_ , signatures={'''serving_default''': model.serving} )
lowercase__ = tf.saved_model.load(UpperCamelCase_ )
lowercase__ = loaded_model.signatures['''serving_default'''](UpperCamelCase_ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowerCamelCase_ ( self: Any ) -> str:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowercase__ = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase__ = tf_tokenizer(UpperCamelCase_ ) # Build model with some sample inputs
lowercase__ = tf_tokenizer.get_config()
lowercase__ = TFGPTaTokenizer.from_config(UpperCamelCase_ )
lowercase__ = model_from_config(UpperCamelCase_ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowercase__ = 123_123
for max_length in [3, 5, 1_024]:
lowercase__ = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase__ = tf_tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ )
lowercase__ = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 110 | """simple docstring"""
import os
import sys
import unittest
lowerCAmelCase__ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase__ : Tuple = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowerCAmelCase__ : Optional[Any] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {'BertModelTest': 'BertModelTester'}
UpperCAmelCase__ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
| 98 | 0 |
'''simple docstring'''
from math import isqrt
def UpperCamelCase ( a ) -> bool:
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(a ) + 1 ) )
def UpperCamelCase ( a = 10**6 ) -> int:
'''simple docstring'''
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 7
while prime_candidate < max_prime:
primes_count += is_prime(a )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 98 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCAmelCase = get_tests_dir("fixtures")
_lowerCAmelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_lowerCAmelCase = get_tests_dir("fixtures/dummy-config.json")
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = 0
def snake_case__ ( self : Optional[int] ):
__magic_name__ = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : Optional[int] ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ ).to_dict()
config_dict.pop('''feature_extractor_type''' )
__magic_name__ = WavaVecaFeatureExtractor(**a__ )
# save in new folder
model_config.save_pretrained(a__ )
config.save_pretrained(a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
# make sure private variable is not incorrectly saved
__magic_name__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : str ):
with self.assertRaisesRegex(
a__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
__magic_name__ = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def snake_case__ ( self : str ):
with self.assertRaisesRegex(
a__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ , revision='''aaaaaa''' )
def snake_case__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
a__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__magic_name__ = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def snake_case__ ( self : Dict ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a__ ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a__ ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ , trust_remote_code=a__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
def snake_case__ ( self : int ):
try:
AutoConfig.register('''custom''' , a__ )
AutoFeatureExtractor.register(a__ , a__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a__ ):
AutoFeatureExtractor.register(a__ , a__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__magic_name__ = CustomFeatureExtractor.from_pretrained(a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def snake_case__ ( self : int ):
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Optional[int] = True
try:
AutoConfig.register('''custom''' , a__ )
AutoFeatureExtractor.register(a__ , a__ )
# If remote code is not set, the default is to use local
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(a__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 98 | 1 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
class A__ ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""audio_values""", """audio_mask"""]
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: List[str]=2048 , _SCREAMING_SNAKE_CASE: Dict=1 , _SCREAMING_SNAKE_CASE: Tuple=[16, 16] , _SCREAMING_SNAKE_CASE: Optional[int]=128 , _SCREAMING_SNAKE_CASE: Tuple=4_4100 , _SCREAMING_SNAKE_CASE: Optional[Any]=86 , _SCREAMING_SNAKE_CASE: Optional[int]=2048 , _SCREAMING_SNAKE_CASE: List[Any]=0.0 , **_SCREAMING_SNAKE_CASE: Tuple , ) -> List[Any]:
"""simple docstring"""
super().__init__(
feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase , )
__lowerCAmelCase : int = spectrogram_length
__lowerCAmelCase : List[Any] = num_channels
__lowerCAmelCase : Dict = patch_size
__lowerCAmelCase : List[Any] = feature_size // self.patch_size[1]
__lowerCAmelCase : Union[str, Any] = n_fft
__lowerCAmelCase : Dict = sampling_rate // hop_length_to_sampling_rate
__lowerCAmelCase : Union[str, Any] = sampling_rate
__lowerCAmelCase : List[Any] = padding_value
__lowerCAmelCase : Dict = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_UpperCAmelCase , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=_UpperCAmelCase , norm="slaney" , mel_scale="slaney" , ).T
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: np.array) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = spectrogram(
_UpperCAmelCase , window_function(self.n_fft , "hann") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
__lowerCAmelCase : Optional[int] = log_spec[:, :-1]
__lowerCAmelCase : Dict = log_spec - 20.0
__lowerCAmelCase : int = np.clip(log_spec / 40.0 , -2.0 , 0.0) + 1.0
return log_spec
def __call__( self: int , _SCREAMING_SNAKE_CASE: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _SCREAMING_SNAKE_CASE: Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = True , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: bool = False , **_SCREAMING_SNAKE_CASE: Tuple , ) -> Optional[Any]:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug.")
__lowerCAmelCase : Tuple = isinstance(_UpperCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""")
__lowerCAmelCase : Tuple = is_batched_numpy or (
isinstance(_UpperCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__lowerCAmelCase : List[Any] = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray):
__lowerCAmelCase : Any = np.asarray(_UpperCAmelCase , dtype=np.floataa)
elif isinstance(_UpperCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__lowerCAmelCase : List[Any] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__lowerCAmelCase : List[Any] = [np.asarray([raw_speech]).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowerCAmelCase : Tuple = [
self._np_extract_fbank_features(waveform.squeeze()).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _UpperCAmelCase):
__lowerCAmelCase : Union[str, Any] = [np.asarray(_UpperCAmelCase , dtype=np.floataa) for feature in audio_features]
# Create audio attention mask
__lowerCAmelCase : List[str] = max(
[ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len for feature in audio_features]) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowerCAmelCase : Optional[Any] = [
(ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [0]
for feature in audio_features
]
__lowerCAmelCase : List[str] = np.array(_UpperCAmelCase).astype(np.floataa)
# convert into correct format for padding
__lowerCAmelCase : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowerCAmelCase : Dict = np.ones([len(_UpperCAmelCase), 1, max_time_len, self.feature_size]).astype(np.floataa)
__lowerCAmelCase : List[Any] = padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase)):
__lowerCAmelCase : Tuple = audio_features[i]
__lowerCAmelCase : Optional[Any] = feature
# return as BatchFeature
if return_attention_mask:
__lowerCAmelCase : Union[str, Any] = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
__lowerCAmelCase : Tuple = {"audio_values": padded_audio_features}
__lowerCAmelCase : Any = BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase)
return encoded_inputs | 269 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 346 | 0 |
import argparse
import os
import re
import packaging.version
_snake_case : Union[str, Any] = 'examples/'
_snake_case : List[str] = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
_snake_case : Optional[Any] = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
_snake_case : Union[str, Any] = 'README.md'
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : int, lowerCAmelCase_ : str ):
with open(lowerCAmelCase_, 'r', encoding='utf-8', newline='\n' ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern]
__lowerCAmelCase = replace.replace('VERSION', lowerCAmelCase_ )
__lowerCAmelCase = re_pattern.sub(lowerCAmelCase_, lowerCAmelCase_ )
with open(lowerCAmelCase_, 'w', encoding='utf-8', newline='\n' ) as f:
f.write(lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
for folder, directories, fnames in os.walk(lowerCAmelCase_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(lowerCAmelCase_, lowerCAmelCase_ ), lowerCAmelCase_, pattern='examples' )
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : str=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
if not patch:
update_version_in_examples(lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = '🤗 Transformers currently provides the following architectures'
__lowerCAmelCase = '1. Want to contribute a new model?'
with open(lowerCAmelCase_, 'r', encoding='utf-8', newline='\n' ) as f:
__lowerCAmelCase = f.readlines()
# Find the start of the list.
__lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
__lowerCAmelCase = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc', 'https://huggingface.co/docs/diffusers/model_doc', )
index += 1
with open(lowerCAmelCase_, 'w', encoding='utf-8', newline='\n' ) as f:
f.writelines(lowerCAmelCase_ )
def a_ ( ):
with open(REPLACE_FILES['init'], 'r' ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase = REPLACE_PATTERNS['init'][0].search(lowerCAmelCase_ ).groups()[0]
return packaging.version.parse(lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optional[Any]=False ):
__lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
__lowerCAmelCase = default_version.base_version
elif patch:
__lowerCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__lowerCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__lowerCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" )
if len(lowerCAmelCase_ ) == 0:
__lowerCAmelCase = default_version
print(F"""Updating version to {version}.""" )
global_version_update(lowerCAmelCase_, patch=lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = get_version()
__lowerCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
__lowerCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(lowerCAmelCase_ ) == 0:
__lowerCAmelCase = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(lowerCAmelCase_ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_snake_case : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 207 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : List[str] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Union[str, Any]=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Dict, lowerCAmelCase_ : int=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ''
else:
__lowerCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = ViTConfig()
__lowerCAmelCase = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
__lowerCAmelCase = True
__lowerCAmelCase = int(vit_name[-12:-10] )
__lowerCAmelCase = int(vit_name[-9:-6] )
else:
__lowerCAmelCase = 1000
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = int(vit_name[-6:-4] )
__lowerCAmelCase = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
__lowerCAmelCase = 192
__lowerCAmelCase = 768
__lowerCAmelCase = 12
__lowerCAmelCase = 3
elif vit_name[9:].startswith('small' ):
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
__lowerCAmelCase = 768
__lowerCAmelCase = 2304
__lowerCAmelCase = 8
__lowerCAmelCase = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
elif vit_name[4:].startswith('huge' ):
__lowerCAmelCase = 1280
__lowerCAmelCase = 5120
__lowerCAmelCase = 32
__lowerCAmelCase = 16
# load original model from timm
__lowerCAmelCase = timm.create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_, lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
__lowerCAmelCase = ViTModel(lowerCAmelCase_ ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
__lowerCAmelCase = DeiTImageProcessor(size=config.image_size )
else:
__lowerCAmelCase = ViTImageProcessor(size=config.image_size )
__lowerCAmelCase = image_processor(images=prepare_img(), return_tensors='pt' )
__lowerCAmelCase = encoding['pixel_values']
__lowerCAmelCase = model(lowerCAmelCase_ )
if base_model:
__lowerCAmelCase = timm_model.forward_features(lowerCAmelCase_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCAmelCase_, outputs.pooler_output, atol=1E-3 )
else:
__lowerCAmelCase = timm_model(lowerCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_, outputs.logits, atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_snake_case : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 207 | 1 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = config_class
SCREAMING_SNAKE_CASE = has_text_modality
SCREAMING_SNAKE_CASE = kwargs
SCREAMING_SNAKE_CASE = common_properties
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) , msg=F'`{prop}` does not exist' )
# Test that config has the common properties as setter
for idx, name in enumerate(lowerCAmelCase__ ):
try:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.parent.assertEqual(
getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ , msg=F'`{name} value {idx} expected, but was {getattr(lowerCAmelCase__ , lowerCAmelCase__ )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(lowerCAmelCase__ ):
try:
SCREAMING_SNAKE_CASE = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ , msg=F'`{name} value {idx} expected, but was {getattr(lowerCAmelCase__ , lowerCAmelCase__ )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , lowerCAmelCase__ )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , 'config.json' )
config_first.to_json_file(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.config_class.from_json_file(lowerCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.config_class.from_pretrained(lowerCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
config_first.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.config_class.from_pretrained(lowerCAmelCase__ , subfolder=lowerCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
SCREAMING_SNAKE_CASE = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def __A ( self ) -> List[Any]:
if self.config_class.is_composition:
return
SCREAMING_SNAKE_CASE = self.config_class()
self.parent.assertIsNotNone(lowerCAmelCase__ )
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = copy.deepcopy(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.config_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(lowerCAmelCase__ , lowerCAmelCase__ ) != value:
wrong_values.append((key, getattr(lowerCAmelCase__ , lowerCAmelCase__ ), value) )
if len(lowerCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE = '\n'.join([F'- {v[0]}: got {v[1]} instead of {v[2]}' for v in wrong_values] )
raise ValueError(F'The following keys were not properly set in the config:\n{errors}' )
def __A ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 113 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__UpperCamelCase = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__UpperCamelCase = '''UperNetConfig'''
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0 , lowerCAmelCase__ = False , lowerCAmelCase__ = 1 , ) -> None:
super().__init__()
SCREAMING_SNAKE_CASE = nn.Convad(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , padding=lowerCAmelCase__ , bias=lowerCAmelCase__ , dilation=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = nn.BatchNormad(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = nn.ReLU()
def __A ( self , lowerCAmelCase__ ) -> torch.Tensor:
SCREAMING_SNAKE_CASE = self.conv(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.batch_norm(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.activation(lowerCAmelCase__ )
return output
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
super().__init__()
SCREAMING_SNAKE_CASE = [
nn.AdaptiveAvgPoolad(lowerCAmelCase__ ),
UperNetConvModule(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> torch.Tensor:
SCREAMING_SNAKE_CASE = input
for layer in self.layers:
SCREAMING_SNAKE_CASE = layer(lowerCAmelCase__ )
return hidden_state
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
super().__init__()
SCREAMING_SNAKE_CASE = pool_scales
SCREAMING_SNAKE_CASE = align_corners
SCREAMING_SNAKE_CASE = in_channels
SCREAMING_SNAKE_CASE = channels
SCREAMING_SNAKE_CASE = []
for i, pool_scale in enumerate(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = UperNetPyramidPoolingBlock(pool_scale=lowerCAmelCase__ , in_channels=lowerCAmelCase__ , channels=lowerCAmelCase__ )
self.blocks.append(lowerCAmelCase__ )
self.add_module(str(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> List[torch.Tensor]:
SCREAMING_SNAKE_CASE = []
for ppm in self.blocks:
SCREAMING_SNAKE_CASE = ppm(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = nn.functional.interpolate(
lowerCAmelCase__ , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(lowerCAmelCase__ )
return ppm_outs
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE = config
SCREAMING_SNAKE_CASE = config.pool_scales # e.g. (1, 2, 3, 6)
SCREAMING_SNAKE_CASE = in_channels
SCREAMING_SNAKE_CASE = config.hidden_size
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
SCREAMING_SNAKE_CASE = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
SCREAMING_SNAKE_CASE = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
SCREAMING_SNAKE_CASE = nn.ModuleList()
SCREAMING_SNAKE_CASE = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
SCREAMING_SNAKE_CASE = UperNetConvModule(lowerCAmelCase__ , self.channels , kernel_size=1 )
SCREAMING_SNAKE_CASE = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(lowerCAmelCase__ )
self.fpn_convs.append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __A ( self ) -> int:
self.apply(self._init_weights )
def __A ( self , lowerCAmelCase__ ) -> Tuple:
if isinstance(lowerCAmelCase__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __A ( self , lowerCAmelCase__ ) -> Optional[int]:
SCREAMING_SNAKE_CASE = inputs[-1]
SCREAMING_SNAKE_CASE = [x]
psp_outs.extend(self.psp_modules(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.cat(lowerCAmelCase__ , dim=1 )
SCREAMING_SNAKE_CASE = self.bottleneck(lowerCAmelCase__ )
return output
def __A ( self , lowerCAmelCase__ ) -> torch.Tensor:
# build laterals
SCREAMING_SNAKE_CASE = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(lowerCAmelCase__ ) )
# build top-down path
SCREAMING_SNAKE_CASE = len(lowerCAmelCase__ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE = laterals[i - 1].shape[2:]
SCREAMING_SNAKE_CASE = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=lowerCAmelCase__ , mode='bilinear' , align_corners=self.align_corners )
# build outputs
SCREAMING_SNAKE_CASE = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
SCREAMING_SNAKE_CASE = torch.cat(lowerCAmelCase__ , dim=1 )
SCREAMING_SNAKE_CASE = self.fpn_bottleneck(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.classifier(lowerCAmelCase__ )
return output
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 1 ) -> None:
super().__init__()
SCREAMING_SNAKE_CASE = config
SCREAMING_SNAKE_CASE = config.auxiliary_in_channels
SCREAMING_SNAKE_CASE = config.auxiliary_channels
SCREAMING_SNAKE_CASE = config.auxiliary_num_convs
SCREAMING_SNAKE_CASE = config.auxiliary_concat_input
SCREAMING_SNAKE_CASE = in_index
SCREAMING_SNAKE_CASE = (kernel_size // 2) * dilation
SCREAMING_SNAKE_CASE = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=lowerCAmelCase__ , padding=lowerCAmelCase__ , dilation=lowerCAmelCase__ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=lowerCAmelCase__ , padding=lowerCAmelCase__ , dilation=lowerCAmelCase__ ) )
if self.num_convs == 0:
SCREAMING_SNAKE_CASE = nn.Identity()
else:
SCREAMING_SNAKE_CASE = nn.Sequential(*lowerCAmelCase__ )
if self.concat_input:
SCREAMING_SNAKE_CASE = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=lowerCAmelCase__ , padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __A ( self ) -> Dict:
self.apply(self._init_weights )
def __A ( self , lowerCAmelCase__ ) -> Dict:
if isinstance(lowerCAmelCase__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __A ( self , lowerCAmelCase__ ) -> torch.Tensor:
# just take the relevant feature maps
SCREAMING_SNAKE_CASE = encoder_hidden_states[self.in_index]
SCREAMING_SNAKE_CASE = self.convs(lowerCAmelCase__ )
if self.concat_input:
SCREAMING_SNAKE_CASE = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
SCREAMING_SNAKE_CASE = self.classifier(lowerCAmelCase__ )
return output
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = UperNetConfig
SCREAMING_SNAKE_CASE_ : Optional[Any] = """pixel_values"""
SCREAMING_SNAKE_CASE_ : Optional[int] = True
def __A ( self , lowerCAmelCase__ ) -> List[str]:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __A ( self ) -> int:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> str:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = value
__UpperCamelCase = R'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__UpperCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , lowerCamelCase_ , )
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ ) -> Optional[int]:
super().__init__(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
SCREAMING_SNAKE_CASE = UperNetHead(lowerCAmelCase__ , in_channels=self.backbone.channels )
SCREAMING_SNAKE_CASE = UperNetFCNHead(lowerCAmelCase__ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC )
def __A ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Union[tuple, SemanticSegmenterOutput]:
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE = output_attentions if output_attentions is not None else self.config.output_attentions
SCREAMING_SNAKE_CASE = self.backbone.forward_with_filtered_kwargs(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = outputs.feature_maps
SCREAMING_SNAKE_CASE = self.decode_head(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = nn.functional.interpolate(lowerCAmelCase__ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = None
if self.auxiliary_head is not None:
SCREAMING_SNAKE_CASE = self.auxiliary_head(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = nn.functional.interpolate(
lowerCAmelCase__ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
SCREAMING_SNAKE_CASE = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
SCREAMING_SNAKE_CASE = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
SCREAMING_SNAKE_CASE = (logits,) + outputs[1:]
else:
SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 113 | 1 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = LxmertConfig.from_json_file(UpperCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case = LxmertForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() ,UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 213 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyImgaImgPipeline
__magic_name__ = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
__magic_name__ = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
__magic_name__ = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__magic_name__ = False
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return self.time_input_dim
@property
def a_ ( self ):
return self.time_input_dim * 4
@property
def a_ ( self ):
return 1_0_0
@property
def a_ ( self ):
snake_case = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
snake_case = MultilingualCLIP(__snake_case )
snake_case = text_encoder.eval()
return text_encoder
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def a_ ( self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def a_ ( self ):
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_unet
snake_case = self.dummy_movq
snake_case = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
snake_case = DDIMScheduler(**__snake_case )
snake_case = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def a_ ( self , __snake_case , __snake_case=0 ):
snake_case = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
snake_case = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case = Image.fromarray(np.uinta(__snake_case ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
if str(__snake_case ).startswith('''mps''' ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
snake_case = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 1_0,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = pipe(**self.get_dummy_inputs(__snake_case ) )
snake_case = output.images
snake_case = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case = np.array(
[0.6147_4943, 0.607_3539, 0.4330_8544, 0.592_8269, 0.4749_3595, 0.4675_5973, 0.461_3838, 0.4536_8797, 0.5011_9233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self ):
snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
snake_case = '''A red cartoon frog, 4k'''
snake_case = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
snake_case = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case , snake_case = pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
snake_case = pipeline(
__snake_case , image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='''np''' , )
snake_case = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 213 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
A_ :int = logging.get_logger(__name__) # pylint: disable=invalid-name
A_ :Dict = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class __A ( lowercase_ ):
"""simple docstring"""
UpperCamelCase__ : Union[PIL.Image.Image, np.ndarray]
class __A ( lowercase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
super().__init__()
self.register_modules(
prior=lowerCamelCase__ , image_encoder=lowerCamelCase__ , image_processor=lowerCamelCase__ , scheduler=lowerCamelCase__ , renderer=lowerCamelCase__ , )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if latents is None:
__UpperCamelCase : Tuple =randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__UpperCamelCase : List[str] =latents.to(lowerCamelCase__ )
__UpperCamelCase : Dict =latents * scheduler.init_noise_sigma
return latents
def __lowercase ( self , lowerCamelCase__=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__UpperCamelCase : Dict =torch.device(f'cuda:{gpu_id}' )
__UpperCamelCase : int =[self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase__ , lowerCamelCase__ )
@property
def __lowercase ( self ):
"""simple docstring"""
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowerCamelCase__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(image[0] , torch.Tensor ):
__UpperCamelCase : Optional[Any] =torch.cat(lowerCamelCase__ , axis=0 ) if image[0].ndim == 4 else torch.stack(lowerCamelCase__ , axis=0 )
if not isinstance(lowerCamelCase__ , torch.Tensor ):
__UpperCamelCase : List[Any] =self.image_processor(lowerCamelCase__ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
__UpperCamelCase : Any =image.to(dtype=self.image_encoder.dtype , device=lowerCamelCase__ )
__UpperCamelCase : List[str] =self.image_encoder(lowerCamelCase__ )['last_hidden_state']
__UpperCamelCase : Any =image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
__UpperCamelCase : Dict =image_embeds.repeat_interleave(lowerCamelCase__ , dim=0 )
if do_classifier_free_guidance:
__UpperCamelCase : Union[str, Any] =torch.zeros_like(lowerCamelCase__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__UpperCamelCase : List[Any] =torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowerCamelCase__ )
def __call__( self , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = 25 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 4.0 , lowerCamelCase__ = 64 , lowerCamelCase__ = "pil" , lowerCamelCase__ = True , ):
"""simple docstring"""
if isinstance(lowerCamelCase__ , PIL.Image.Image ):
__UpperCamelCase : Any =1
elif isinstance(lowerCamelCase__ , torch.Tensor ):
__UpperCamelCase : str =image.shape[0]
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
__UpperCamelCase : Union[str, Any] =len(lowerCamelCase__ )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowerCamelCase__ )}' )
__UpperCamelCase : Tuple =self._execution_device
__UpperCamelCase : List[str] =batch_size * num_images_per_prompt
__UpperCamelCase : Optional[Any] =guidance_scale > 1.0
__UpperCamelCase : List[Any] =self._encode_image(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# prior
self.scheduler.set_timesteps(lowerCamelCase__ , device=lowerCamelCase__ )
__UpperCamelCase : List[str] =self.scheduler.timesteps
__UpperCamelCase : Optional[int] =self.prior.config.num_embeddings
__UpperCamelCase : Tuple =self.prior.config.embedding_dim
__UpperCamelCase : Optional[Any] =self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
__UpperCamelCase : Tuple =latents.reshape(latents.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
for i, t in enumerate(self.progress_bar(lowerCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
__UpperCamelCase : List[str] =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__UpperCamelCase : List[str] =self.scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : int =self.prior(
lowerCamelCase__ , timestep=lowerCamelCase__ , proj_embedding=lowerCamelCase__ , ).predicted_image_embedding
# remove the variance
__UpperCamelCase , __UpperCamelCase : Tuple =noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
__UpperCamelCase , __UpperCamelCase : List[str] =noise_pred.chunk(2 )
__UpperCamelCase : Optional[Any] =noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
__UpperCamelCase : Tuple =self.scheduler.step(
lowerCamelCase__ , timestep=lowerCamelCase__ , sample=lowerCamelCase__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowerCamelCase__ )
__UpperCamelCase : List[Any] =[]
for i, latent in enumerate(lowerCamelCase__ ):
print()
__UpperCamelCase : int =self.renderer.decode(
latent[None, :] , lowerCamelCase__ , size=lowerCamelCase__ , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(lowerCamelCase__ )
__UpperCamelCase : Tuple =torch.stack(lowerCamelCase__ )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
__UpperCamelCase : str =images.cpu().numpy()
if output_type == "pil":
__UpperCamelCase : Optional[int] =[self.numpy_to_pil(lowerCamelCase__ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowerCamelCase__ )
| 71 |
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCamelCase : str = Mapping[str, np.ndarray]
lowerCamelCase : List[Any] = Mapping[str, Any] # Is a nested dict.
lowerCamelCase : Any = 0.0_1
@dataclasses.dataclass(frozen=lowercase_ )
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCAmelCase__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCAmelCase__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCAmelCase__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCAmelCase__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
lowerCAmelCase__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
lowerCAmelCase__ : Optional[Sequence[int]] = None
def _SCREAMING_SNAKE_CASE (A ) -> Protein:
"""simple docstring"""
lowercase__ = R'''(\[[A-Z]+\]\n)'''
lowercase__ = [tag.strip() for tag in re.split(A , A ) if len(A ) > 0]
lowercase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowercase__ = ["N", "CA", "C"]
lowercase__ = None
lowercase__ = None
lowercase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase__ = g[1][0].strip()
for i in range(len(A ) ):
if seq[i] not in residue_constants.restypes:
lowercase__ = '''X''' # FIXME: strings are immutable
lowercase__ = np.array(
[residue_constants.restype_order.get(A , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase__ = []
for axis in range(3 ):
tertiary.append(list(map(A , g[1][axis].split() ) ) )
lowercase__ = np.array(A )
lowercase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(A ):
lowercase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowercase__ = np.zeros(
(
len(A ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(A ):
lowercase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=A , atom_mask=A , aatype=A , residue_index=np.arange(len(A ) ) , b_factors=A , )
def _SCREAMING_SNAKE_CASE (A , A = 0 ) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = prot.remark
if remark is not None:
pdb_headers.append(f"REMARK {remark}" )
lowercase__ = prot.parents
lowercase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase__ = [p for i, p in zip(A , A ) if i == chain_id]
if parents is None or len(A ) == 0:
lowercase__ = ['''N/A''']
pdb_headers.append(f"PARENT {' '.join(A )}" )
return pdb_headers
def _SCREAMING_SNAKE_CASE (A , A ) -> str:
"""simple docstring"""
lowercase__ = []
lowercase__ = pdb_str.split('''\n''' )
lowercase__ = prot.remark
if remark is not None:
out_pdb_lines.append(f"REMARK {remark}" )
lowercase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowercase__ = []
if prot.parents_chain_index is not None:
lowercase__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(A ) , [] )
parent_dict[str(A )].append(A )
lowercase__ = max([int(A ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase__ = parent_dict.get(str(A ) , ['''N/A'''] )
parents_per_chain.append(A )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase__ = [['''N/A''']]
def make_parent_line(A ) -> str:
return f"PARENT {' '.join(A )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase__ = 0
for i, l in enumerate(A ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(A )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(A ):
lowercase__ = parents_per_chain[chain_counter]
else:
lowercase__ = ['''N/A''']
out_pdb_lines.append(make_parent_line(A ) )
return "\n".join(A )
def _SCREAMING_SNAKE_CASE (A ) -> str:
"""simple docstring"""
lowercase__ = residue_constants.restypes + ['''X''']
def res_atoa(A ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowercase__ = residue_constants.atom_types
lowercase__ = []
lowercase__ = prot.atom_mask
lowercase__ = prot.aatype
lowercase__ = prot.atom_positions
lowercase__ = prot.residue_index.astype(np.intaa )
lowercase__ = prot.b_factors
lowercase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowercase__ = get_pdb_headers(A )
if len(A ) > 0:
pdb_lines.extend(A )
lowercase__ = aatype.shape[0]
lowercase__ = 1
lowercase__ = 0
lowercase__ = string.ascii_uppercase
lowercase__ = None
# Add all atom sites.
for i in range(A ):
lowercase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(A , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowercase__ = '''ATOM'''
lowercase__ = atom_name if len(A ) == 4 else f" {atom_name}"
lowercase__ = ''''''
lowercase__ = ''''''
lowercase__ = 1.00
lowercase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase__ = ''''''
lowercase__ = '''A'''
if chain_index is not None:
lowercase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase__ = (
f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
f"{res_name_a:>3} {chain_tag:>1}"
f"{residue_index[i]:>4}{insertion_code:>1} "
f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
f"{occupancy:>6.2f}{b_factor:>6.2f} "
f"{element:>2}{charge:>2}"
)
pdb_lines.append(A )
atom_index += 1
lowercase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase__ = True
lowercase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase__ = '''TER'''
lowercase__ = (
f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(A )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(A , A ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(A )
def _SCREAMING_SNAKE_CASE (A ) -> np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _SCREAMING_SNAKE_CASE (A , A , A = None , A = None , A = None , A = None , A = None , ) -> Protein:
"""simple docstring"""
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=A , remark=A , parents=A , parents_chain_index=A , )
| 2 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __snake_case ( _SCREAMING_SNAKE_CASE ,unittest.TestCase):
"""simple docstring"""
lowercase = MobileBertTokenizer
lowercase = MobileBertTokenizerFast
lowercase = True
lowercase = True
lowercase = filter_non_english
lowercase = 'google/mobilebert-uncased'
def __lowercase ( self : Any ) -> List[str]:
super().setUp()
lowerCAmelCase_ : List[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCAmelCase_ : Any = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __lowercase ( self : Union[str, Any] , lowerCamelCase : Any ) -> Optional[int]:
lowerCAmelCase_ : int = """UNwant\u00E9d,running"""
lowerCAmelCase_ : str = """unwanted, running"""
return input_text, output_text
def __lowercase ( self : Optional[int] ) -> Tuple:
lowerCAmelCase_ : int = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_ : List[str] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [9, 6, 7, 12, 10, 11] )
def __lowercase ( self : Optional[int] ) -> Any:
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer()
lowerCAmelCase_ : int = """UNwant\u00E9d,running"""
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize(lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Optional[int] = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
lowerCAmelCase_ : Dict = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Any = self.get_rust_tokenizer()
lowerCAmelCase_ : Tuple = tokenizer.encode(lowerCamelCase )
lowerCAmelCase_ : Tuple = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# With lower casing
lowerCAmelCase_ : List[str] = self.get_tokenizer(do_lower_case=lowerCamelCase )
lowerCAmelCase_ : List[str] = self.get_rust_tokenizer(do_lower_case=lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = """UNwant\u00E9d,running"""
lowerCAmelCase_ : Dict = tokenizer.tokenize(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : int = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
lowerCAmelCase_ : Dict = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : int = self.get_rust_tokenizer()
lowerCAmelCase_ : Any = tokenizer.encode(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def __lowercase ( self : Optional[int] ) -> Tuple:
lowerCAmelCase_ : Optional[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __lowercase ( self : int ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[Any] = BasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __lowercase ( self : Any ) -> List[Any]:
lowerCAmelCase_ : int = BasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __lowercase ( self : int ) -> str:
lowerCAmelCase_ : Union[str, Any] = BasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __lowercase ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase_ : Optional[int] = BasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __lowercase ( self : List[str] ) -> int:
lowerCAmelCase_ : List[Any] = BasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __lowercase ( self : Optional[int] ) -> List[Any]:
lowerCAmelCase_ : Dict = BasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __lowercase ( self : Dict ) -> Optional[Any]:
lowerCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __lowercase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase_ : Any = BasicTokenizer(do_lower_case=lowerCamelCase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __lowercase ( self : Optional[Any] ) -> int:
lowerCAmelCase_ : List[str] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
lowerCAmelCase_ : List[Any] = {}
for i, token in enumerate(lowerCamelCase ):
lowerCAmelCase_ : int = i
lowerCAmelCase_ : List[str] = WordpieceTokenizer(vocab=lowerCamelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __lowercase ( self : List[Any] ) -> Any:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __lowercase ( self : Optional[Any] ) -> List[Any]:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __lowercase ( self : Tuple ) -> List[str]:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __lowercase ( self : Tuple ) -> int:
lowerCAmelCase_ : int = self.get_tokenizer()
lowerCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def __lowercase ( self : Dict ) -> str:
lowerCAmelCase_ : str = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
lowerCAmelCase_ : Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCamelCase )
lowerCAmelCase_ : List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCamelCase )
lowerCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def __lowercase ( self : List[Any] ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Any = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowerCAmelCase_ : Optional[Any] = tokenizer_r.encode_plus(
lowerCamelCase , return_attention_mask=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase , )
lowerCAmelCase_ : str = tokenizer_r.do_lower_case if hasattr(lowerCamelCase , """do_lower_case""" ) else False
lowerCAmelCase_ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __lowercase ( self : int ) -> Tuple:
lowerCAmelCase_ : Tuple = ["""的""", """人""", """有"""]
lowerCAmelCase_ : List[Any] = """""".join(lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Optional[int] = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : str = tokenizer_p.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
lowerCAmelCase_ : Dict = tokenizer_r.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
lowerCAmelCase_ : str = tokenizer_r.convert_ids_to_tokens(lowerCamelCase )
lowerCAmelCase_ : int = tokenizer_p.convert_ids_to_tokens(lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : List[str] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : str = tokenizer_r.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
lowerCAmelCase_ : Optional[int] = tokenizer_p.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
lowerCAmelCase_ : List[Any] = tokenizer_r.convert_ids_to_tokens(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase_ : List[str] = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(lowerCamelCase )
]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
| 89 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : int = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'efficientformer'
def __init__( self : Any , lowerCamelCase : List[int] = [3, 2, 6, 4] , lowerCamelCase : List[int] = [48, 96, 2_24, 4_48] , lowerCamelCase : List[bool] = [True, True, True, True] , lowerCamelCase : int = 4_48 , lowerCamelCase : int = 32 , lowerCamelCase : int = 4 , lowerCamelCase : int = 7 , lowerCamelCase : int = 5 , lowerCamelCase : int = 8 , lowerCamelCase : int = 4 , lowerCamelCase : float = 0.0 , lowerCamelCase : int = 16 , lowerCamelCase : int = 3 , lowerCamelCase : int = 3 , lowerCamelCase : int = 3 , lowerCamelCase : int = 2 , lowerCamelCase : int = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : int = 1 , lowerCamelCase : bool = True , lowerCamelCase : bool = True , lowerCamelCase : float = 1E-5 , lowerCamelCase : str = "gelu" , lowerCamelCase : float = 0.02 , lowerCamelCase : float = 1E-12 , lowerCamelCase : int = 2_24 , lowerCamelCase : float = 1E-05 , **lowerCamelCase : int , ) -> None:
super().__init__(**lowerCamelCase )
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = hidden_sizes
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : Tuple = initializer_range
lowerCAmelCase_ : Union[str, Any] = layer_norm_eps
lowerCAmelCase_ : int = patch_size
lowerCAmelCase_ : List[str] = num_channels
lowerCAmelCase_ : Dict = depths
lowerCAmelCase_ : int = mlp_expansion_ratio
lowerCAmelCase_ : Optional[Any] = downsamples
lowerCAmelCase_ : Union[str, Any] = dim
lowerCAmelCase_ : Union[str, Any] = key_dim
lowerCAmelCase_ : str = attention_ratio
lowerCAmelCase_ : Tuple = resolution
lowerCAmelCase_ : Optional[Any] = pool_size
lowerCAmelCase_ : str = downsample_patch_size
lowerCAmelCase_ : Dict = downsample_stride
lowerCAmelCase_ : str = downsample_pad
lowerCAmelCase_ : str = drop_path_rate
lowerCAmelCase_ : List[Any] = num_metaad_blocks
lowerCAmelCase_ : Tuple = distillation
lowerCAmelCase_ : Optional[Any] = use_layer_scale
lowerCAmelCase_ : Dict = layer_scale_init_value
lowerCAmelCase_ : Optional[Any] = image_size
lowerCAmelCase_ : Optional[Any] = batch_norm_eps
| 89 | 1 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __lowerCAmelCase ( lowercase : Namespace ) -> str:
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__snake_case = '''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class _lowerCAmelCase ( a__ ):
@staticmethod
def lowerCamelCase ( UpperCamelCase__ ) -> int:
'''simple docstring'''
snake_case : Dict = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=UpperCamelCase_ , required=UpperCamelCase_ , help="Model\'s type." )
train_parser.add_argument(
"--tf_checkpoint" , type=UpperCamelCase_ , required=UpperCamelCase_ , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=UpperCamelCase_ , required=UpperCamelCase_ , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=UpperCamelCase_ , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = logging.get_logger("transformers-cli/converting" )
self._logger.info(F'Loading model {model_type}' )
snake_case : Optional[int] = model_type
snake_case : Optional[Any] = tf_checkpoint
snake_case : List[Any] = pytorch_dump_output
snake_case : Optional[int] = config
snake_case : Any = finetuning_task_name
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
if "ckpt" in self._tf_checkpoint.lower():
snake_case : Union[str, Any] = self._tf_checkpoint
snake_case : List[str] = ''''''
else:
snake_case : Dict = self._tf_checkpoint
snake_case : Any = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
UpperCamelCase_ , self._config , self._pytorch_dump_output , UpperCamelCase_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 203 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case_( a__ ):
def __init__( self : int , UpperCamelCase_ : VQModel , UpperCamelCase_ : UNetaDModel , UpperCamelCase_ : DDIMScheduler ):
super().__init__()
self.register_modules(vqvae=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , **UpperCamelCase_ : Optional[int] , ):
lowerCAmelCase : Dict = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCamelCase_ , )
lowerCAmelCase : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase : List[str] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCAmelCase : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase : List[str] = {}
if accepts_eta:
lowerCAmelCase : List[Any] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCAmelCase : List[str] = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
# predict the noise residual
lowerCAmelCase : Tuple = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : Optional[Any] = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
# decode the image latents with the VAE
lowerCAmelCase : Dict = self.vqvae.decode(UpperCamelCase_ ).sample
lowerCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : List[str] = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 60 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
lowerCamelCase_ = 0
lowerCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
lowerCamelCase_ = tuple[int, int]
class UpperCamelCase_ :
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Node | None , ) -> Any:
UpperCAmelCase_ : int = pos_x
UpperCAmelCase_ : Optional[int] = pos_y
UpperCAmelCase_ : Optional[Any] = (pos_y, pos_x)
UpperCAmelCase_ : Any = goal_x
UpperCAmelCase_ : Optional[Any] = goal_y
UpperCAmelCase_ : Union[str, Any] = g_cost
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Optional[int] = self.calculate_heuristic()
UpperCAmelCase_ : Tuple = self.g_cost + self.h_cost
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.pos_x - self.goal_x
UpperCAmelCase_ : Dict = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCAmelCase_ ) + abs(lowerCAmelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Optional[Any] , lowerCAmelCase_ : Node ) -> str:
return self.f_cost < other.f_cost
class UpperCamelCase_ :
def __init__( self : Tuple , lowerCAmelCase_ : TPosition , lowerCAmelCase_ : TPosition ) -> str:
UpperCAmelCase_ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , lowerCAmelCase_ )
UpperCAmelCase_ : int = [self.start]
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : List[str] = False
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_ : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCAmelCase_ )
self.closed_nodes.append(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = self.get_successors(lowerCAmelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCAmelCase_ )
else:
# retrieve the best current path
UpperCAmelCase_ : Dict = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCAmelCase_ )
else:
self.open_nodes.append(lowerCAmelCase_ )
return [self.start.pos]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Node ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = []
for action in delta:
UpperCAmelCase_ : Any = parent.pos_x + action[1]
UpperCAmelCase_ : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCAmelCase_ , lowerCAmelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCAmelCase_ , ) )
return successors
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Node | None ) -> int:
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Dict = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase_ : Optional[Any] = current_node.parent
path.reverse()
return path
class UpperCamelCase_ :
def __init__( self : int , lowerCAmelCase_ : TPosition , lowerCAmelCase_ : TPosition ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = AStar(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : int = AStar(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase_ : str = self.fwd_astar.open_nodes.pop(0 )
UpperCAmelCase_ : Dict = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCAmelCase_ , lowerCAmelCase_ )
self.fwd_astar.closed_nodes.append(lowerCAmelCase_ )
self.bwd_astar.closed_nodes.append(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = current_bwd_node
UpperCAmelCase_ : Tuple = current_fwd_node
UpperCAmelCase_ : Optional[Any] = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCAmelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCAmelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCAmelCase_ )
else:
# retrieve the best current path
UpperCAmelCase_ : Tuple = astar.open_nodes.pop(
astar.open_nodes.index(lowerCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCAmelCase_ )
else:
astar.open_nodes.append(lowerCAmelCase_ )
return [self.fwd_astar.start.pos]
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> Dict:
UpperCAmelCase_ : Tuple = self.fwd_astar.retrace_path(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = self.bwd_astar.retrace_path(lowerCAmelCase_ )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
lowerCamelCase_ = (0, 0)
lowerCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCamelCase_ = time.time()
lowerCamelCase_ = AStar(init, goal)
lowerCamelCase_ = a_star.search()
lowerCamelCase_ = time.time() - start_time
print(f'AStar execution time = {end_time:f} seconds')
lowerCamelCase_ = time.time()
lowerCamelCase_ = BidirectionalAStar(init, goal)
lowerCamelCase_ = time.time() - bd_start_time
print(f'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 353 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCamelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ (__A ):
__magic_name__ = ['''pixel_values''']
def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 255 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ : Any = size if size is not None else {"shortest_edge": 256}
UpperCAmelCase_ : List[str] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
UpperCAmelCase_ : Any = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ : Union[str, Any] = get_size_dict(lowerCAmelCase_ , param_name="crop_size" )
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : int = size
UpperCAmelCase_ : Optional[int] = resample
UpperCAmelCase_ : Tuple = do_center_crop
UpperCAmelCase_ : Any = crop_size
UpperCAmelCase_ : List[str] = do_rescale
UpperCAmelCase_ : Dict = rescale_factor
UpperCAmelCase_ : str = do_normalize
UpperCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Any , ) -> np.ndarray:
UpperCAmelCase_ : Any = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase_ : List[Any] = get_resize_output_image_size(lowerCAmelCase_ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray:
UpperCAmelCase_ : List[str] = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowerCAmelCase_ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[Any] ) -> np.ndarray:
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[Any] , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Tuple , ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : List[str] = size if size is not None else self.size
UpperCAmelCase_ : Optional[int] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = resample if resample is not None else self.resample
UpperCAmelCase_ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : List[Any] = get_size_dict(lowerCAmelCase_ , param_name="crop_size" )
UpperCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Optional[Any] = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[Any] = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
UpperCAmelCase_ : Union[str, Any] = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
UpperCAmelCase_ : Tuple = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
UpperCAmelCase_ : Any = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
UpperCAmelCase_ : Any = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
UpperCAmelCase_ : int = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Tuple] = None ) -> Optional[int]:
UpperCAmelCase_ : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = target_sizes.numpy()
UpperCAmelCase_ : Dict = []
for idx in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowerCAmelCase_ )
UpperCAmelCase_ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase_ )
else:
UpperCAmelCase_ : Tuple = logits.argmax(dim=1 )
UpperCAmelCase_ : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 253 | 0 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'vision-encoder-decoder'
lowerCamelCase__ = True
def __init__( self, **__a):
'''simple docstring'''
super().__init__(**__a)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"A configuraton of type {self.model_type} cannot be instantiated because "
f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}")
_lowerCAmelCase : str = kwargs.pop("encoder")
_lowerCAmelCase : Any = encoder_config.pop("model_type")
_lowerCAmelCase : str = kwargs.pop("decoder")
_lowerCAmelCase : List[str] = decoder_config.pop("model_type")
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Optional[int] = True
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : str = True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[str] = self.encoder.to_dict()
_lowerCAmelCase : List[str] = self.decoder.to_dict()
_lowerCAmelCase : Any = self.__class__.model_type
return output
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}})
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = OrderedDict()
_lowerCAmelCase : Any = {0: "batch", 1: "past_decoder_sequence + sequence"}
_lowerCAmelCase : List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"}
_lowerCAmelCase : Optional[Any] = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def snake_case__ ( self, __a, __a = -1, __a = -1, __a = False, __a = None, ):
'''simple docstring'''
import torch
_lowerCAmelCase : Optional[Any] = OrderedDict()
_lowerCAmelCase : List[str] = super().generate_dummy_inputs(
__a, batch_size=__a, seq_length=__a, is_pair=__a, framework=__a)
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = dummy_input["input_ids"].shape
_lowerCAmelCase : str = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowerCAmelCase : List[str] = dummy_input.pop("input_ids")
_lowerCAmelCase : List[str] = dummy_input.pop("attention_mask")
_lowerCAmelCase : Optional[int] = torch.zeros(__a)
return common_inputs
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self, __a):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(__a)
def snake_case__ ( self, __a, __a, __a = "default"):
'''simple docstring'''
_lowerCAmelCase : Dict = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__a, __a)
| 36 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'vision-encoder-decoder'
lowerCamelCase__ = True
def __init__( self, **__a):
'''simple docstring'''
super().__init__(**__a)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"A configuraton of type {self.model_type} cannot be instantiated because "
f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}")
_lowerCAmelCase : str = kwargs.pop("encoder")
_lowerCAmelCase : Any = encoder_config.pop("model_type")
_lowerCAmelCase : str = kwargs.pop("decoder")
_lowerCAmelCase : List[str] = decoder_config.pop("model_type")
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Optional[int] = True
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : str = True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[str] = self.encoder.to_dict()
_lowerCAmelCase : List[str] = self.decoder.to_dict()
_lowerCAmelCase : Any = self.__class__.model_type
return output
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}})
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = OrderedDict()
_lowerCAmelCase : Any = {0: "batch", 1: "past_decoder_sequence + sequence"}
_lowerCAmelCase : List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"}
_lowerCAmelCase : Optional[Any] = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def snake_case__ ( self, __a, __a = -1, __a = -1, __a = False, __a = None, ):
'''simple docstring'''
import torch
_lowerCAmelCase : Optional[Any] = OrderedDict()
_lowerCAmelCase : List[str] = super().generate_dummy_inputs(
__a, batch_size=__a, seq_length=__a, is_pair=__a, framework=__a)
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = dummy_input["input_ids"].shape
_lowerCAmelCase : str = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowerCAmelCase : List[str] = dummy_input.pop("input_ids")
_lowerCAmelCase : List[str] = dummy_input.pop("attention_mask")
_lowerCAmelCase : Optional[int] = torch.zeros(__a)
return common_inputs
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self, __a):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(__a)
def snake_case__ ( self, __a, __a, __a = "default"):
'''simple docstring'''
_lowerCAmelCase : Dict = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__a, __a)
| 36 | 1 |
from math import loga
def __lowerCamelCase ( _lowercase ) -> int:
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_lowercase , _lowercase ):
raise TypeError("""Input value must be a 'int' type""" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __lowerCamelCase ( _lowercase ) -> List[Any]:
for i in range(0 , _lowercase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def __lowerCamelCase ( _lowercase ) -> Dict:
for i in range(_lowercase , 0 , -1 ):
for _ in range(_lowercase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def __lowerCamelCase ( _lowercase ) -> List[Any]:
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(_lowercase ) # upper half
reverse_floyd(_lowercase ) # lower half
if __name__ == "__main__":
print(R"""| /\ | |- | |- |--| |\ /| |-""")
print(R"""|/ \| |- |_ |_ |__| | \/ | |_""")
a : List[Any] = 1
while K:
a : int = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
a : Tuple = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 338 | 0 |
import comet # From: unbabel-comet
import torch
import datasets
_lowerCAmelCase : Optional[int] = datasets.logging.get_logger(__name__)
_lowerCAmelCase : Any = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
_lowerCAmelCase : Any = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
_lowerCAmelCase : List[Any] = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://unbabel.github.io/COMET/html/index.html' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'sources': datasets.Value('string' , id='sequence' ),
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/Unbabel/COMET'] , reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] , )
def __magic_name__ ( self , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
if self.config_name == "default":
__a =comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
__a =comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None , __snake_case=False ) -> Any:
'''simple docstring'''
if gpus is None:
__a =1 if torch.cuda.is_available() else 0
__a ={'src': sources, 'mt': predictions, 'ref': references}
__a =[dict(zip(__snake_case , __snake_case ) ) for t in zip(*data.values() )]
__a , __a =self.scorer.predict(__snake_case , gpus=__snake_case , progress_bar=__snake_case )
return {"mean_score": mean_score, "scores": scores}
| 218 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCAmelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase_( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ):
"""simple docstring"""
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , _snake_case , )
if isinstance(_snake_case , torch.Tensor ):
return image
elif isinstance(_snake_case , PIL.Image.Image ):
__a =[image]
if isinstance(image[0] , PIL.Image.Image ):
__a , __a =image[0].size
__a , __a =(x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__a =[np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
__a =np.concatenate(_snake_case , axis=0 )
__a =np.array(_snake_case ).astype(np.floataa ) / 255.0
__a =image.transpose(0 , 3 , 1 , 2 )
__a =2.0 * image - 1.0
__a =torch.from_numpy(_snake_case )
elif isinstance(image[0] , torch.Tensor ):
__a =torch.cat(_snake_case , dim=0 )
return image
def UpperCamelCase_( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ):
"""simple docstring"""
if isinstance(_snake_case , torch.Tensor ):
return mask
elif isinstance(_snake_case , PIL.Image.Image ):
__a =[mask]
if isinstance(mask[0] , PIL.Image.Image ):
__a , __a =mask[0].size
__a , __a =(x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a =[np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
__a =np.concatenate(_snake_case , axis=0 )
__a =mask.astype(np.floataa ) / 255.0
__a =0
__a =1
__a =torch.from_numpy(_snake_case )
elif isinstance(mask[0] , torch.Tensor ):
__a =torch.cat(_snake_case , dim=0 )
return mask
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
def __init__( self , __snake_case , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self , __snake_case , __snake_case , __snake_case = 250 , __snake_case = 0.0 , __snake_case = 10 , __snake_case = 10 , __snake_case = None , __snake_case = "pil" , __snake_case = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__a =image
__a =_preprocess_image(__snake_case )
__a =original_image.to(device=self.device , dtype=self.unet.dtype )
__a =_preprocess_mask(__snake_case )
__a =mask_image.to(device=self.device , dtype=self.unet.dtype )
__a =original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
__a =original_image.shape
__a =randn_tensor(__snake_case , generator=__snake_case , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__snake_case , __snake_case , __snake_case , self.device )
__a =eta
__a =self.scheduler.timesteps[0] + 1
__a =generator[0] if isinstance(__snake_case , __snake_case ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__a =self.unet(__snake_case , __snake_case ).sample
# compute previous image: x_t -> x_t-1
__a =self.scheduler.step(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__a =self.scheduler.undo_step(__snake_case , __snake_case , __snake_case )
__a =t
__a =(image / 2 + 0.5).clamp(0 , 1 )
__a =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a =self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 218 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''deta'''
lowerCamelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _a=None , _a=900 , _a=2048 , _a=6 , _a=2048 , _a=8 , _a=6 , _a=1024 , _a=8 , _a=0.0 , _a=True , _a="relu" , _a=256 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.0_2 , _a=1.0 , _a=True , _a=False , _a="sine" , _a=5 , _a=4 , _a=4 , _a=True , _a=300 , _a=True , _a=True , _a=1 , _a=5 , _a=2 , _a=1 , _a=1 , _a=5 , _a=2 , _a=0.1 , _a=0.2_5 , **_a , ) -> List[Any]:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowerCAmelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(_a , _a ):
lowerCAmelCase_ = backbone_config.pop("model_type" )
lowerCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ = config_class.from_dict(_a )
lowerCAmelCase_ = backbone_config
lowerCAmelCase_ = num_queries
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = d_model
lowerCAmelCase_ = encoder_ffn_dim
lowerCAmelCase_ = encoder_layers
lowerCAmelCase_ = encoder_attention_heads
lowerCAmelCase_ = decoder_ffn_dim
lowerCAmelCase_ = decoder_layers
lowerCAmelCase_ = decoder_attention_heads
lowerCAmelCase_ = dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = activation_dropout
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = init_std
lowerCAmelCase_ = init_xavier_std
lowerCAmelCase_ = encoder_layerdrop
lowerCAmelCase_ = auxiliary_loss
lowerCAmelCase_ = position_embedding_type
# deformable attributes
lowerCAmelCase_ = num_feature_levels
lowerCAmelCase_ = encoder_n_points
lowerCAmelCase_ = decoder_n_points
lowerCAmelCase_ = two_stage
lowerCAmelCase_ = two_stage_num_proposals
lowerCAmelCase_ = with_box_refine
lowerCAmelCase_ = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowerCAmelCase_ = class_cost
lowerCAmelCase_ = bbox_cost
lowerCAmelCase_ = giou_cost
# Loss coefficients
lowerCAmelCase_ = mask_loss_coefficient
lowerCAmelCase_ = dice_loss_coefficient
lowerCAmelCase_ = bbox_loss_coefficient
lowerCAmelCase_ = giou_loss_coefficient
lowerCAmelCase_ = eos_coefficient
lowerCAmelCase_ = focal_alpha
super().__init__(is_encoder_decoder=_a , **_a )
@property
def __a ( self ) -> int:
return self.encoder_attention_heads
@property
def __a ( self ) -> int:
return self.d_model
def __a ( self ) -> Tuple:
lowerCAmelCase_ = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ = self.backbone_config.to_dict()
lowerCAmelCase_ = self.__class__.model_type
return output
| 365 |
import math
def A(__a: int ):
return math.sqrt(__a ) * math.sqrt(__a ) == num
def A(__a: int ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = n
while left <= right:
lowerCAmelCase_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase_ = mid - 1
else:
lowerCAmelCase_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (__lowerCamelCase ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 309 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : int
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase_ = namedtuple('CoinsDistribResult', 'moves excess')
def lowerCamelCase__ ( A__ : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A__ ) != count_coins(A__ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(A__ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.left )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.right )
__lowerCamelCase = 1 - left_distrib_excess
__lowerCamelCase = 1 - right_distrib_excess
__lowerCamelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(A__ )
+ abs(A__ )
)
__lowerCamelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A__ , A__ )
return get_distrib(A__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 0 |
import sys
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = len(__A )
UpperCAmelCase__ = [[0 for x in range(__A )] for x in range(__A )]
UpperCAmelCase__ = [[0 for x in range(__A )] for x in range(__A )]
for chain_length in range(2, __A ):
for a in range(1, n - chain_length + 1 ):
UpperCAmelCase__ = a + chain_length - 1
UpperCAmelCase__ = sys.maxsize
for c in range(__A, __A ):
UpperCAmelCase__ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase__ = cost
UpperCAmelCase__ = c
return matrix, sol
def lowerCAmelCase_ ( __A, __A, __A ) -> Any:
'''simple docstring'''
if i == j:
print("A" + str(__A ), end=" " )
else:
print("(", end=" " )
print_optiomal_solution(__A, __A, optimal_solution[i][j] )
print_optiomal_solution(__A, optimal_solution[i][j] + 1, __A )
print(")", end=" " )
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase__ = [30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase__ = len(__A )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase__ , UpperCAmelCase__ = matrix_chain_order(__A )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(__A, 1, n - 1 )
if __name__ == "__main__":
main()
| 143 | from __future__ import annotations
def lowerCAmelCase_ ( __A ) -> list[int]:
'''simple docstring'''
if len(__A ) == 0:
return array
UpperCAmelCase__ , UpperCAmelCase__ = min(__A ), max(__A )
# Compute the variables
UpperCAmelCase__ = _max - _min + 1
UpperCAmelCase__ , UpperCAmelCase__ = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCAmelCase__ = i - _min
UpperCAmelCase__ = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCAmelCase__ = 0
for i in range(__A ):
while holes_repeat[i] > 0:
UpperCAmelCase__ = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = input('Enter numbers separated by comma:\n')
UpperCamelCase__ = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 143 | 1 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
_lowerCAmelCase : List[Any] = os.path.abspath(_lowerCamelCase )
logger.info(F"Loading PyTorch weights from {pt_path}" )
_lowerCAmelCase : str = torch.load(_lowerCamelCase , map_location="cpu" )
logger.info(F"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters." )
_lowerCAmelCase : List[Any] = convert_pytorch_state_dict_to_flax(_lowerCamelCase , _lowerCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
_lowerCAmelCase : Any = convert_pytorch_sharded_state_dict_to_flax(_lowerCamelCase , _lowerCamelCase )
return flax_state_dict
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_lowerCamelCase ) -> bool:
return len(set(_lowerCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
_lowerCAmelCase : Tuple = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
_lowerCAmelCase : Dict = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
_lowerCAmelCase : Optional[Any] = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
_lowerCAmelCase : Any = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
_lowerCAmelCase : List[str] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
_lowerCAmelCase : Any = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_lowerCAmelCase : Any = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
_lowerCAmelCase : Any = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_lowerCAmelCase : Dict = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_lowerCAmelCase : Dict = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
_lowerCAmelCase : Tuple = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
_lowerCAmelCase : Any = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
_lowerCAmelCase : Any = pt_tuple_key[-2] + "_v"
if name is not None:
_lowerCAmelCase : List[Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
_lowerCAmelCase : Optional[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
_lowerCAmelCase : List[Any] = flax_model.params["params"]
else:
_lowerCAmelCase : Any = flax_model.params
_lowerCAmelCase : Union[str, Any] = flatten_dict(_lowerCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_lowerCAmelCase : int = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_lowerCamelCase )
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Dict = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
_lowerCAmelCase : List[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_lowerCAmelCase : List[str] = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
_lowerCAmelCase : Any = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_lowerCAmelCase : List[str] = pt_tuple_key[1:]
# Correctly rename weight parameters
_lowerCAmelCase , _lowerCAmelCase : List[Any] = rename_key_and_reshape_tensor(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# add model prefix if necessary
_lowerCAmelCase : Any = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_lowerCAmelCase : Optional[int] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
_lowerCAmelCase : Union[str, Any] = jnp.asarray(_lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
_lowerCAmelCase : Optional[Any] = jnp.asarray(_lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
_lowerCAmelCase : Any = jnp.asarray(_lowerCamelCase )
return unflatten_dict(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
import torch
# Load the index
_lowerCAmelCase : int = {}
for shard_file in shard_filenames:
# load using msgpack utils
_lowerCAmelCase : Union[str, Any] = torch.load(_lowerCamelCase )
_lowerCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
_lowerCAmelCase : Optional[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_lowerCAmelCase : Optional[int] = flax_model.params["params"]
_lowerCAmelCase : Dict = flatten_dict(_lowerCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
_lowerCAmelCase : str = flax_model.params
_lowerCAmelCase : Tuple = flatten_dict(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
_lowerCAmelCase : int = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_lowerCAmelCase : Dict = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
_lowerCAmelCase : Dict = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_lowerCAmelCase : List[str] = pt_tuple_key[1:]
# Correctly rename weight parameters
_lowerCAmelCase , _lowerCAmelCase : List[str] = rename_key_and_reshape_tensor(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# add model prefix if necessary
_lowerCAmelCase : Any = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_lowerCAmelCase : str = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
_lowerCAmelCase : Any = jnp.asarray(_lowerCamelCase )
continue
if "var" in flax_key[-1]:
_lowerCAmelCase : Tuple = jnp.asarray(_lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
_lowerCAmelCase : Optional[int] = jnp.asarray(_lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
_lowerCAmelCase : Tuple = jnp.asarray(_lowerCamelCase )
return unflatten_dict(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = os.path.abspath(_lowerCamelCase )
logger.info(F"Loading Flax weights from {flax_checkpoint_path}" )
# import correct flax class
_lowerCAmelCase : Dict = getattr(_lowerCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_lowerCamelCase , "rb" ) as state_f:
try:
_lowerCAmelCase : Optional[Any] = from_bytes(_lowerCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"Unable to convert {flax_checkpoint_path} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
_lowerCAmelCase : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda _lowerCamelCase : x.dtype == jnp.bfloataa , _lowerCamelCase ) ).values()
if any(_lowerCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
_lowerCAmelCase : Tuple = jax.tree_util.tree_map(
lambda _lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = flatten_dict(_lowerCamelCase )
_lowerCAmelCase : Any = pt_model.state_dict()
_lowerCAmelCase : Tuple = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
_lowerCAmelCase : Optional[Any] = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Optional[Any] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_lowerCAmelCase : int = flax_key_tuple[0] == pt_model.base_model_prefix
_lowerCAmelCase : List[str] = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
_lowerCAmelCase : Dict = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
_lowerCAmelCase : int = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_lowerCamelCase ) not in pt_model_dict:
# conv layer
_lowerCAmelCase : Optional[Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCAmelCase : List[Any] = jnp.transpose(_lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCamelCase ) not in pt_model_dict:
# linear layer
_lowerCAmelCase : Any = flax_key_tuple[:-1] + ("weight",)
_lowerCAmelCase : Any = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCAmelCase : Optional[int] = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
_lowerCAmelCase : Union[str, Any] = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
_lowerCAmelCase : Dict = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
_lowerCAmelCase : str = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
_lowerCAmelCase : Optional[int] = ".".join(_lowerCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
_lowerCAmelCase : Dict = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
_lowerCAmelCase : Dict = key.split("." )
_lowerCAmelCase : List[Any] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
_lowerCAmelCase : List[Any] = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
_lowerCAmelCase : str = key_components[-2] + "_v"
if name is not None:
_lowerCAmelCase : Any = key_components[:-3] + [name]
_lowerCAmelCase : Union[str, Any] = ".".join(_lowerCamelCase )
_lowerCAmelCase : Dict = key
if flax_key in special_pt_names:
_lowerCAmelCase : Optional[int] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
_lowerCAmelCase : str = np.asarray(_lowerCamelCase ) if not isinstance(_lowerCamelCase , np.ndarray ) else flax_tensor
_lowerCAmelCase : Union[str, Any] = torch.from_numpy(_lowerCamelCase )
# remove from missing keys
missing_keys.remove(_lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowerCamelCase )
pt_model.load_state_dict(_lowerCamelCase )
# re-transform missing_keys to list
_lowerCAmelCase : Dict = list(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n" )
if len(_lowerCamelCase ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
" use it for predictions and inference." )
else:
logger.warning(
F"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"
"If your task is similar to the task the model of the checkpoint was trained on, "
F"you can already use {pt_model.__class__.__name__} for predictions without further training." )
return pt_model
| 36 |
import argparse
from collections import defaultdict
import yaml
_snake_case = "docs/source/en/_toctree.yml"
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = defaultdict(_lowerCamelCase )
_lowerCAmelCase : Any = []
_lowerCAmelCase : List[str] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = new_doc_list
_lowerCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase : str = []
for duplicate_key in duplicates:
_lowerCAmelCase : List[str] = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(_lowerCamelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
_lowerCAmelCase : Optional[Any] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCamelCase ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(_lowerCamelCase )
# Sort
return overview_doc
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : int = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : List[str] = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : Union[str, Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase : Optional[Any] = api_doc[scheduler_idx]["sections"]
_lowerCAmelCase : Optional[Any] = clean_doc_toc(_lowerCamelCase )
_lowerCAmelCase : int = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase : List[Any] = True
if overwrite:
_lowerCAmelCase : Dict = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase : Tuple = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : int = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : List[str] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[int] = api_doc[pipeline_idx]["sections"]
_lowerCAmelCase : Tuple = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase : List[Any] = pipeline_doc["section"]
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if overwrite:
_lowerCAmelCase : Optional[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCamelCase )
# sort overall pipeline doc
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase : Dict = True
if overwrite:
_lowerCAmelCase : Optional[int] = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase : Optional[int] = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_snake_case = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 36 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 103 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=224, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5], SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5], ) -> List[str]:
UpperCamelCase : Optional[int] = size if size is not None else {'height': 18, 'width': 18}
UpperCamelCase : List[Any] = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : int = num_channels
UpperCamelCase : int = image_size
UpperCamelCase : List[Any] = min_resolution
UpperCamelCase : int = max_resolution
UpperCamelCase : Any = do_resize
UpperCamelCase : Optional[int] = size
UpperCamelCase : List[str] = do_normalize
UpperCamelCase : Optional[Any] = image_mean
UpperCamelCase : Tuple = image_std
def snake_case_ ( self ) -> List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = ViTImageProcessor if is_vision_available() else None
def snake_case_ ( self ) -> Any:
UpperCamelCase : Dict = EfficientFormerImageProcessorTester(self )
@property
def snake_case_ ( self ) -> List[Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'image_std' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'size' ) )
def snake_case_ ( self ) -> Any:
pass
def snake_case_ ( self ) -> int:
# Initialize image_processor
UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : List[str] = prepare_image_inputs(self.image_proc_tester, equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, Image.Image )
# Test not batched input
UpperCamelCase : str = image_processor(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
# Test batched
UpperCamelCase : Optional[Any] = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
def snake_case_ ( self ) -> str:
# Initialize image_processor
UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester, equal_resolution=SCREAMING_SNAKE_CASE_, numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, np.ndarray )
# Test not batched input
UpperCamelCase : Dict = image_processor(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
# Test batched
UpperCamelCase : Dict = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
def snake_case_ ( self ) -> Tuple:
# Initialize image_processor
UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : int = prepare_image_inputs(self.image_proc_tester, equal_resolution=SCREAMING_SNAKE_CASE_, torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, torch.Tensor )
# Test not batched input
UpperCamelCase : Optional[int] = image_processor(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
# Test batched
UpperCamelCase : int = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
| 103 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.