code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import math
def snake_case ( A__ = 1_00 ):
UpperCAmelCase_ : int = sum(i * i for i in range(1 ,n + 1 ) )
UpperCAmelCase_ : Union[str, Any] = int(math.pow(sum(range(1 ,n + 1 ) ) ,2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 268 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ (__A ):
def __init__( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict=768 ) -> List[Any]:
super().__init__(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = proj_size
UpperCAmelCase_ : Optional[Any] = CLIPVisionModel(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = PaintByExampleMapper(lowerCAmelCase_ )
UpperCAmelCase_ : str = nn.LayerNorm(config.hidden_size )
UpperCAmelCase_ : List[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict=False ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = self.model(pixel_values=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = clip_output.pooler_output
UpperCAmelCase_ : List[Any] = self.mapper(latent_states[:, None] )
UpperCAmelCase_ : List[str] = self.final_layer_norm(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.proj_out(lowerCAmelCase_ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class UpperCamelCase_ (nn.Module ):
def __init__( self : Dict , lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
super().__init__()
UpperCAmelCase_ : List[Any] = (config.num_hidden_layers + 1) // 5
UpperCAmelCase_ : Optional[Any] = config.hidden_size
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , activation_fn="gelu" , attention_bias=lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ )
] )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[str] ) -> str:
for block in self.blocks:
UpperCAmelCase_ : int = block(lowerCAmelCase_ )
return hidden_states
| 268 | 1 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> int:
UpperCamelCase = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
UpperCamelCase = n - k
# Calculate C(n,k)
for i in range(__UpperCamelCase ):
result *= n - i
result //= i + 1
return result
def lowercase__ ( __UpperCamelCase )-> int:
return binomial_coefficient(2 * node_count , __UpperCamelCase ) // (node_count + 1)
def lowercase__ ( __UpperCamelCase )-> int:
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
UpperCamelCase = 1
for i in range(1 , n + 1 ):
result *= i
return result
def lowercase__ ( __UpperCamelCase )-> int:
return catalan_number(__UpperCamelCase ) * factorial(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
f'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
f'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 183 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE__ = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
SCREAMING_SNAKE_CASE__ = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def lowercase__ ( __UpperCamelCase )-> Any:
with open(__UpperCamelCase , """r""" ) as f:
UpperCamelCase = f.read().splitlines()
return [l.strip() for l in lines]
class a_ ( lowerCamelCase ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<cls>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE="<eos>" , **_SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = load_vocab_file(_SCREAMING_SNAKE_CASE )
UpperCamelCase = dict(enumerate(self.all_tokens ) )
UpperCamelCase = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCamelCase = unk_token
UpperCamelCase = cls_token
UpperCamelCase = pad_token
UpperCamelCase = mask_token
UpperCamelCase = eos_token
UpperCamelCase = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) )
def A__ ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return text.split()
def A__ ( self , _SCREAMING_SNAKE_CASE=False ) -> Dict:
"""simple docstring"""
return len(self._id_to_token )
def A__ ( self ) -> Tuple:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens )}
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCamelCase = [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
if token_ids_a is not None:
mask += [0] * len(_SCREAMING_SNAKE_CASE ) + [1]
return mask
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def A__ ( self ) -> int:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> int:
"""simple docstring"""
return super()._add_tokens(_SCREAMING_SNAKE_CASE , special_tokens=_SCREAMING_SNAKE_CASE )
| 183 | 1 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 65 | import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase__ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase__ = [0, 2_5, 5_0]
UpperCamelCase__ = [2_5, 5_0, 7_5]
UpperCamelCase__ = fuzz.membership.trimf(X, abca)
UpperCamelCase__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase__ = np.ones(7_5)
UpperCamelCase__ = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 65 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="open-llama"
def __init__( self :Union[str, Any] , _lowercase :List[Any]=100000 , _lowercase :Dict=4096 , _lowercase :List[Any]=11008 , _lowercase :Optional[int]=32 , _lowercase :Union[str, Any]=32 , _lowercase :List[str]="silu" , _lowercase :Union[str, Any]=2048 , _lowercase :Any=0.02 , _lowercase :Optional[Any]=1E-6 , _lowercase :str=True , _lowercase :str=0 , _lowercase :Any=1 , _lowercase :Optional[Any]=2 , _lowercase :str=False , _lowercase :Dict=True , _lowercase :Optional[Any]=0.1 , _lowercase :Tuple=0.1 , _lowercase :Dict=True , _lowercase :List[Any]=True , _lowercase :Dict=None , **_lowercase :Optional[int] , ) -> List[Any]:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = kwargs.pop(
'''use_memorry_efficient_attention''' , _lowercase)
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_dropout_prob
UpperCAmelCase_ = use_stable_embedding
UpperCAmelCase_ = shared_input_output_embedding
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , tie_word_embeddings=_lowercase , **_lowercase , )
def __a ( self :int) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowercase) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _lowercase)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _lowercase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(_lowercase , _lowercase) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 344 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class a_ ( nn.Module ):
def __init__( self :Optional[Any]) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def __a ( self :Dict , _lowercase :int) -> str:
return self.lineara(self.batchnorm(self.lineara(_lowercase)))
class a_ ( _snake_case ):
def __a ( self :Tuple , _lowercase :Optional[int] , *_lowercase :Union[str, Any] , **_lowercase :Any) -> Optional[Any]:
return (args[0] + 1,) + args[1:], kwargs
class a_ ( _snake_case ):
def __a ( self :Union[str, Any] , _lowercase :Dict , _lowercase :Tuple) -> int:
return output + 1
class a_ ( unittest.TestCase ):
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
self.assertEqual(test_model._hf_hook , _lowercase)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[Any]) -> Any:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
add_hook_to_module(_lowercase , _lowercase , append=_lowercase)
self.assertEqual(isinstance(test_model._hf_hook , _lowercase) , _lowercase)
self.assertEqual(len(test_model._hf_hook.hooks) , 2)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(x + 1)
UpperCAmelCase_ = test_model(x + 2)
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PreForwardHook() , PreForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , _lowercase , atol=1E-5)
def __a ( self :List[str]) -> int:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PostForwardHook() , PostForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , output + 2 , atol=1E-5)
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1))
self.assertTrue(outputa.requires_grad)
UpperCAmelCase_ = True
UpperCAmelCase_ = test_model(_lowercase)
self.assertFalse(outputa.requires_grad)
@require_multi_gpu
def __a ( self :Tuple) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1))
self.assertEqual(model.lineara.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0))
self.assertEqual(model.lineara.weight.device , torch.device(1))
# We can still make a forward pass. The input does not need to be on any particular device
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(1))
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_lowercase , AlignDevicesHook(io_same_device=_lowercase))
UpperCAmelCase_ = torch.randn(2 , 3).to(0)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(0))
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(hook_kwargs['''execution_device'''])
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
UpperCAmelCase_ = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :List[Any]) -> str:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase , offload_buffers=_lowercase)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict())
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict() , offload_buffers=_lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
| 344 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 50_00_00_00 ) -> int:
_lowerCAmelCase : Optional[int] = set()
_lowerCAmelCase : Optional[int] = int((limit - 24) ** (1 / 2) )
_lowerCAmelCase : Optional[Any] = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , a_ ) ) )
for primea in primes:
_lowerCAmelCase : List[Any] = primea * primea
for primea in primes:
_lowerCAmelCase : Dict = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
_lowerCAmelCase : Tuple = primea * primea * primea * primea
_lowerCAmelCase : Optional[int] = square + cube + tetr
if total >= limit:
break
ret.add(a_ )
return len(a_ )
if __name__ == "__main__":
print(F'{solution() = }')
| 309 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def __lowerCamelCase ( a_ : str , a_ : Dict , a_ : Any , a_ : str ) -> str:
__SCREAMING_SNAKE_CASE :int = s.rsplit(a_ , a_ )
return new.join(a_ )
def __lowerCamelCase ( a_ : List[str] ) -> Dict:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __lowerCamelCase ( a_ : Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE :Optional[int] = {}
__SCREAMING_SNAKE_CASE :Union[str, Any] = ['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__SCREAMING_SNAKE_CASE :Optional[Any] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
__SCREAMING_SNAKE_CASE :str = key.replace('''res_path.''' , '''res_path.path.''' )
if key.endswith('''.w''' ):
__SCREAMING_SNAKE_CASE :List[Any] = rreplace(a_ , '''.w''' , '''.weight''' , 1 )
if key.endswith('''.b''' ):
__SCREAMING_SNAKE_CASE :List[Any] = rreplace(a_ , '''.b''' , '''.bias''' , 1 )
__SCREAMING_SNAKE_CASE :Optional[Any] = value.float()
return upgrade
@torch.no_grad()
def __lowerCamelCase ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int]=None , a_ : Dict=True ) -> Union[str, Any]:
from dall_e import Encoder
__SCREAMING_SNAKE_CASE :int = Encoder()
if os.path.exists(a_ ):
__SCREAMING_SNAKE_CASE :Dict = torch.load(a_ )
else:
__SCREAMING_SNAKE_CASE :List[str] = torch.hub.load_state_dict_from_url(a_ )
if isinstance(a_ , a_ ):
__SCREAMING_SNAKE_CASE :List[str] = ckpt.state_dict()
encoder.load_state_dict(a_ )
if config_path is not None:
__SCREAMING_SNAKE_CASE :Any = FlavaImageCodebookConfig.from_pretrained(a_ )
else:
__SCREAMING_SNAKE_CASE :Optional[int] = FlavaImageCodebookConfig()
__SCREAMING_SNAKE_CASE :Tuple = FlavaImageCodebook(a_ ).eval()
__SCREAMING_SNAKE_CASE :List[str] = encoder.state_dict()
__SCREAMING_SNAKE_CASE :Union[str, Any] = upgrade_state_dict(a_ )
hf_model.load_state_dict(a_ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = hf_model.state_dict()
__SCREAMING_SNAKE_CASE :Union[str, Any] = count_parameters(a_ )
__SCREAMING_SNAKE_CASE :Any = count_parameters(a_ )
assert torch.allclose(a_ , a_ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(a_ )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowerCamelCase_ = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 191 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[str] ="""fnet"""
def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any]=3_20_00 , UpperCamelCase : Any=7_68 , UpperCamelCase : Tuple=12 , UpperCamelCase : Union[str, Any]=30_72 , UpperCamelCase : int="gelu_new" , UpperCamelCase : str=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : List[str]=4 , UpperCamelCase : Union[str, Any]=0.02 , UpperCamelCase : Union[str, Any]=1e-1_2 , UpperCamelCase : List[str]=False , UpperCamelCase : Optional[int]=5_12 , UpperCamelCase : Any=3 , UpperCamelCase : str=1 , UpperCamelCase : Optional[Any]=2 , **UpperCamelCase : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Optional[int] = vocab_size
_snake_case : int = max_position_embeddings
_snake_case : Dict = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : List[str] = intermediate_size
_snake_case : Union[str, Any] = hidden_act
_snake_case : Optional[Any] = hidden_dropout_prob
_snake_case : List[Any] = initializer_range
_snake_case : int = type_vocab_size
_snake_case : Union[str, Any] = layer_norm_eps
_snake_case : str = use_tpu_fourier_optimizations
_snake_case : Tuple = tpu_short_seq_length
| 260 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , *UpperCamelCase : int , **UpperCamelCase : Dict ):
'''simple docstring'''
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 260 | 1 |
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__UpperCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> str:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
snake_case_ = os.path.abspath(UpperCAmelCase )
logger.info(f'Loading PyTorch weights from {pt_path}' )
snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )
logger.info(f'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
snake_case_ = convert_pytorch_state_dict_to_flax(UpperCAmelCase , UpperCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
snake_case_ = convert_pytorch_sharded_state_dict_to_flax(UpperCAmelCase , UpperCAmelCase )
return flax_state_dict
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(UpperCAmelCase ) -> bool:
return len(set(UpperCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
snake_case_ = pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
snake_case_ = pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
snake_case_ = pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
snake_case_ = pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
snake_case_ = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(UpperCAmelCase ):
snake_case_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
snake_case_ = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(UpperCAmelCase ):
snake_case_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
snake_case_ = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
snake_case_ = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
snake_case_ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
snake_case_ = pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
snake_case_ = pt_tuple_key[-2] + '_v'
if name is not None:
snake_case_ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Tuple:
# convert pytorch tensor to numpy
snake_case_ = {k: v.numpy() for k, v in pt_state_dict.items()}
snake_case_ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
snake_case_ = flax_model.params['params']
else:
snake_case_ = flax_model.params
snake_case_ = flatten_dict(UpperCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
snake_case_ = flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(UpperCAmelCase )
snake_case_ = {}
snake_case_ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
snake_case_ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case_ = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
snake_case_ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case_ = pt_tuple_key[1:]
# Correctly rename weight parameters
snake_case_ , snake_case_ = rename_key_and_reshape_tensor(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# add model prefix if necessary
snake_case_ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
snake_case_ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
snake_case_ = jnp.asarray(UpperCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(UpperCAmelCase , UpperCAmelCase )
continue
# also add unexpected weight so that warning is thrown
snake_case_ = jnp.asarray(UpperCAmelCase )
else:
# also add unexpected weight so that warning is thrown
snake_case_ = jnp.asarray(UpperCAmelCase )
return unflatten_dict(UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> str:
import torch
# Load the index
snake_case_ = {}
for shard_file in shard_filenames:
# load using msgpack utils
snake_case_ = torch.load(UpperCAmelCase )
snake_case_ = {k: v.numpy() for k, v in pt_state_dict.items()}
snake_case_ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
snake_case_ = flax_model.params['params']
snake_case_ = flatten_dict(UpperCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
snake_case_ = flax_model.params
snake_case_ = flatten_dict(UpperCAmelCase )
snake_case_ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
snake_case_ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case_ = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
snake_case_ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case_ = pt_tuple_key[1:]
# Correctly rename weight parameters
snake_case_ , snake_case_ = rename_key_and_reshape_tensor(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# add model prefix if necessary
snake_case_ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
snake_case_ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
snake_case_ = jnp.asarray(UpperCAmelCase )
continue
if "var" in flax_key[-1]:
snake_case_ = jnp.asarray(UpperCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(UpperCAmelCase , UpperCAmelCase )
continue
# also add unexpected weight so that warning is thrown
snake_case_ = jnp.asarray(UpperCAmelCase )
else:
# also add unexpected weight so that warning is thrown
snake_case_ = jnp.asarray(UpperCAmelCase )
return unflatten_dict(UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ = os.path.abspath(UpperCAmelCase )
logger.info(f'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
snake_case_ = getattr(UpperCAmelCase , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(UpperCAmelCase , 'rb' ) as state_f:
try:
snake_case_ = from_bytes(UpperCAmelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Tuple:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
snake_case_ = flatten_dict(jax.tree_util.tree_map(lambda UpperCAmelCase : x.dtype == jnp.bfloataa , UpperCAmelCase ) ).values()
if any(UpperCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
snake_case_ = jax.tree_util.tree_map(
lambda UpperCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCAmelCase )
snake_case_ = flatten_dict(UpperCAmelCase )
snake_case_ = pt_model.state_dict()
snake_case_ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
snake_case_ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
snake_case_ = []
snake_case_ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
snake_case_ = flax_key_tuple[0] == pt_model.base_model_prefix
snake_case_ = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case_ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
snake_case_ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(UpperCAmelCase ) not in pt_model_dict:
# conv layer
snake_case_ = flax_key_tuple[:-1] + ('weight',)
snake_case_ = jnp.transpose(UpperCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCAmelCase ) not in pt_model_dict:
# linear layer
snake_case_ = flax_key_tuple[:-1] + ('weight',)
snake_case_ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
snake_case_ = flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
snake_case_ = flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
snake_case_ = flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
snake_case_ = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
snake_case_ = '.'.join(UpperCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
snake_case_ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
snake_case_ = key.split('.' )
snake_case_ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
snake_case_ = key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
snake_case_ = key_components[-2] + '_v'
if name is not None:
snake_case_ = key_components[:-3] + [name]
snake_case_ = '.'.join(UpperCAmelCase )
snake_case_ = key
if flax_key in special_pt_names:
snake_case_ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
snake_case_ = np.asarray(UpperCAmelCase ) if not isinstance(UpperCAmelCase , np.ndarray ) else flax_tensor
snake_case_ = torch.from_numpy(UpperCAmelCase )
# remove from missing keys
missing_keys.remove(UpperCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(UpperCAmelCase )
pt_model.load_state_dict(UpperCAmelCase )
# re-transform missing_keys to list
snake_case_ = list(UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(f'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(UpperCAmelCase ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
else:
logger.warning(
f'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'If your task is similar to the task the model of the checkpoint was trained on, '
f'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 69 |
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(A_ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(A_ ) == 1:
return True
__magic_name__ = series[1] - series[0]
for index in range(len(A_ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(A_ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
__magic_name__ = 0
for val in series:
answer += val
return answer / len(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Any = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 351 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A__ : Tuple = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
A__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 209 | 0 |
def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = 0
while number > 0:
UpperCamelCase = number % 10
sum_of_digits += last_digit
UpperCamelCase = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowercase( UpperCamelCase_ = 100 ) -> int:
'''simple docstring'''
UpperCamelCase = factorial(UpperCamelCase_ )
UpperCamelCase = split_and_add(UpperCamelCase_ )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 343 | from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
pass
class SCREAMING_SNAKE_CASE_ :
def __init__( self : List[Any] , lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = data
UpperCamelCase = None
def __iter__( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self
UpperCamelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCamelCase_ )
yield node.data
UpperCamelCase = node.next_node
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = Node(1)
_SCREAMING_SNAKE_CASE = Node(2)
_SCREAMING_SNAKE_CASE = Node(3)
_SCREAMING_SNAKE_CASE = Node(4)
print(root_node.has_loop) # False
_SCREAMING_SNAKE_CASE = root_node.next_node
print(root_node.has_loop) # True
_SCREAMING_SNAKE_CASE = Node(5)
_SCREAMING_SNAKE_CASE = Node(6)
_SCREAMING_SNAKE_CASE = Node(5)
_SCREAMING_SNAKE_CASE = Node(6)
print(root_node.has_loop) # False
_SCREAMING_SNAKE_CASE = Node(1)
print(root_node.has_loop) # False
| 343 | 1 |
from __future__ import annotations
import math
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(UpperCamelCase_ ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , )
def lowerCAmelCase_ ( ) -> None:
UpperCamelCase_ = [90, 23, 6, 33, 21, 65, 123, 34423]
UpperCamelCase_ = math.log(len(UpperCamelCase_ ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 366 |
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
UpperCamelCase_ = len(UpperCamelCase_ )
UpperCamelCase_ = len(matrix[0] )
UpperCamelCase_ = min(UpperCamelCase_ , UpperCamelCase_ )
for row in range(UpperCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase_ ):
UpperCamelCase_ = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase_ = True
for i in range(row + 1 , UpperCamelCase_ ):
if matrix[i][row] != 0:
UpperCamelCase_ , UpperCamelCase_ = matrix[i], matrix[row]
UpperCamelCase_ = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase_ ):
UpperCamelCase_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 0 |
"""simple docstring"""
def lowercase ( _snake_case : str ) ->str:
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 |
"""simple docstring"""
from math import factorial, radians
def lowercase ( _snake_case : float , _snake_case : int = 18 , _snake_case : int = 10 ) ->float:
"""simple docstring"""
__snake_case : Any = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__snake_case : int = radians(_snake_case )
__snake_case : str = angle_in_radians
__snake_case : Optional[int] = 3
__snake_case : List[Any] = -1
for _ in range(_snake_case ):
result += (b * (angle_in_radians**a)) / factorial(_snake_case )
__snake_case : int = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_snake_case , _snake_case )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 102 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=UpperCamelCase__ , )
assert hasattr(self , 'env' )
def A ( self : Dict , UpperCamelCase__ : List[Any]=1 ):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def A ( self : int , UpperCamelCase__ : Any ):
"""simple docstring"""
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , UpperCamelCase__ )
| 368 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def A ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = TextaTextGenerationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
return generator, ["Something to write", "Something else"]
def A ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = generator('Something there' )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ANY(UpperCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
UpperCamelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
] , )
UpperCamelCase = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
] , )
with self.assertRaises(UpperCamelCase__ ):
generator(4 )
@require_torch
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
UpperCamelCase = generator('Something there' , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ''}] )
UpperCamelCase = 3
UpperCamelCase = generator(
'Something there' , num_return_sequences=UpperCamelCase__ , num_beams=UpperCamelCase__ , )
UpperCamelCase = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = generator('This is a test' , do_sample=UpperCamelCase__ , num_return_sequences=2 , return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
UpperCamelCase = generator.model.config.eos_token_id
UpperCamelCase = '<pad>'
UpperCamelCase = generator(
['This is a test', 'This is a second test'] , do_sample=UpperCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase__ , )
self.assertEqual(
UpperCamelCase__ , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
UpperCamelCase = generator('Something there' , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ''}] )
| 249 | 0 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __A ( snake_case_ , snake_case_ ):
@register_to_config
def __init__(self : Tuple , __a : int = 768 , ):
super().__init__()
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , __a ) )
UpperCAmelCase_ = nn.Parameter(torch.ones(1 , __a ) )
def _lowercase (self : Dict , __a : Any = None , __a : List[str] = None , ):
UpperCAmelCase_ = nn.Parameter(self.mean.to(__a ).to(__a ) )
UpperCAmelCase_ = nn.Parameter(self.std.to(__a ).to(__a ) )
return self
def _lowercase (self : Union[str, Any] , __a : Union[str, Any] ):
UpperCAmelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def _lowercase (self : int , __a : Optional[int] ):
UpperCAmelCase_ = (embeds * self.std) + self.mean
return embeds
| 1 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : str = StableUnCLIPImgaImgPipeline
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_snake_case : List[Any] = frozenset([] )
def a__ ( self ):
__a = 32
__a = embedder_hidden_size
# image encoding components
__a = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__a = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__a = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
__a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__a = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
__a = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__a = AutoencoderKL()
__a = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def a__ ( self , lowerCamelCase , lowerCamelCase=0 , lowerCamelCase=True ):
if str(lowerCamelCase ).startswith("mps" ):
__a = torch.manual_seed(lowerCamelCase )
else:
__a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
__a = input_image * 0.5 + 0.5
__a = input_image.clamp(0 , 1 )
__a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def a__ ( self ):
__a = "cpu" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableUnCLIPImgaImgPipeline(**lowerCamelCase )
__a = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = self.get_dummy_inputs(lowerCamelCase )
inputs.update({"image_embeds": None} )
__a = sd_pipe(**lowerCamelCase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a__ ( self ):
__a = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def a__ ( self ):
__a = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
__a = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = pipe(
lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 261 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class A__ ( a__ ):
"""simple docstring"""
__magic_name__ = ['pixel_values']
def __init__( self , __snake_case = True , __snake_case = None , __snake_case = PIL.Image.BICUBIC , __snake_case = True , __snake_case = None , __snake_case = 1 / 2_5_5 , __snake_case = True , __snake_case = True , __snake_case = None , __snake_case = None , **__snake_case , ):
super().__init__(**__snake_case )
snake_case = size if size is not None else {"height": 2_5_6, "width": 2_5_6}
snake_case = get_size_dict(__snake_case )
snake_case = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
snake_case = get_size_dict(__snake_case , param_name='''crop_size''' )
snake_case = do_resize
snake_case = size
snake_case = resample
snake_case = do_center_crop
snake_case = crop_size
snake_case = do_rescale
snake_case = rescale_factor
snake_case = do_normalize
snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a_ ( self , __snake_case , __snake_case , __snake_case = PIL.Image.BICUBIC , __snake_case = None , **__snake_case , ):
snake_case = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
__snake_case , size=(size['''height'''], size['''width''']) , resample=__snake_case , data_format=__snake_case , **__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case = None , **__snake_case , ):
snake_case = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(__snake_case , size=(size['''height'''], size['''width''']) , data_format=__snake_case , **__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case = None , **__snake_case , ):
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case = None , **__snake_case , ):
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def a_ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case=None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = ChannelDimension.FIRST , **__snake_case , ):
snake_case = do_resize if do_resize is not None else self.do_resize
snake_case = resample if resample is not None else self.resample
snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case = do_rescale if do_rescale is not None else self.do_rescale
snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case = do_normalize if do_normalize is not None else self.do_normalize
snake_case = image_mean if image_mean is not None else self.image_mean
snake_case = image_std if image_std is not None else self.image_std
snake_case = size if size is not None else self.size
snake_case = get_size_dict(__snake_case )
snake_case = crop_size if crop_size is not None else self.crop_size
snake_case = get_size_dict(__snake_case , param_name='''crop_size''' )
snake_case = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
snake_case = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
snake_case = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_center_crop:
snake_case = [self.center_crop(image=__snake_case , size=__snake_case ) for image in images]
if do_rescale:
snake_case = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_normalize:
snake_case = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images]
snake_case = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
snake_case = {"pixel_values": images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 370 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = UnCLIPImageVariationPipeline
__magic_name__ = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
__magic_name__ = IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
__magic_name__ = False
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return self.time_input_dim
@property
def a_ ( self ):
return self.time_input_dim * 4
@property
def a_ ( self ):
return 1_0_0
@property
def a_ ( self ):
snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__snake_case )
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , )
return CLIPVisionModelWithProjection(__snake_case )
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
snake_case = UnCLIPTextProjModel(**__snake_case )
return model
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = {
'''sample_size''': 3_2,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def a_ ( self ):
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def a_ ( self ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
snake_case = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def a_ ( self ):
snake_case = self.dummy_decoder
snake_case = self.dummy_text_proj
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_super_res_first
snake_case = self.dummy_super_res_last
snake_case = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
snake_case = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
snake_case = CLIPImageProcessor(crop_size=3_2 , size=3_2 )
snake_case = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def a_ ( self , __snake_case , __snake_case=0 , __snake_case=True ):
snake_case = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith('''mps''' ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
if pil_image:
snake_case = input_image * 0.5 + 0.5
snake_case = input_image.clamp(0 , 1 )
snake_case = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case = DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
snake_case = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = torch.device('''cpu''' )
class A__ :
"""simple docstring"""
__magic_name__ = 1
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device=__snake_case ).manual_seed(0 )
snake_case = pipe.decoder.dtype
snake_case = 1
snake_case = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
snake_case = pipe.prepare_latents(
__snake_case , dtype=__snake_case , device=__snake_case , generator=__snake_case , latents=__snake_case , scheduler=DummyScheduler() )
snake_case = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
snake_case = pipe.prepare_latents(
__snake_case , dtype=__snake_case , device=__snake_case , generator=__snake_case , latents=__snake_case , scheduler=DummyScheduler() )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , decoder_latents=__snake_case , super_res_latents=__snake_case ).images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
# Don't pass image, instead pass embedding
snake_case = pipeline_inputs.pop('''image''' )
snake_case = pipe.image_encoder(__snake_case ).image_embeds
snake_case = pipe(
**__snake_case , decoder_latents=__snake_case , super_res_latents=__snake_case , image_embeddings=__snake_case , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def a_ ( self ):
snake_case = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
snake_case = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=__snake_case , expected_max_diff=__snake_case )
@skip_mps
def a_ ( self ):
snake_case = torch_device == '''cpu'''
snake_case = True
snake_case = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=__snake_case , relax_max_difference=__snake_case , additional_params_copy_to_batched_inputs=__snake_case , )
def a_ ( self ):
snake_case = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
snake_case = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__snake_case , additional_params_copy_to_batched_inputs=__snake_case , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__snake_case )
@skip_mps
def a_ ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a_ ( self ):
return super().test_save_load_local()
@skip_mps
def a_ ( self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self ):
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
snake_case = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case = pipeline(
__snake_case , generator=__snake_case , output_type='''np''' , )
snake_case = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(__snake_case , __snake_case , 1_5 )
| 213 | 0 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def a_ ( __snake_case : int , __snake_case : Optional[int] , __snake_case : Optional[Any]=None , **__snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[x.strip() for x in open(__snake_case ).readlines()]
lowerCamelCase_ =[x.strip() for x in open(__snake_case ).readlines()][: len(__snake_case )]
lowerCamelCase_ =calculate_rouge(__snake_case , __snake_case , **__snake_case )
if save_path is not None:
save_json(__snake_case , __snake_case , indent=__snake_case )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 75 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 1000 ) -> int:
lowerCamelCase__ : str = -1
lowerCamelCase__ : Dict = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowerCamelCase__ : Dict = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowerCamelCase__ : Any = n - a - b
if c * c == (a * a + b * b):
lowerCamelCase__ : Dict = a * b * c
if candidate >= product:
lowerCamelCase__ : Union[str, Any] = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 41 | 0 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class snake_case_( a__ , a__ ):
__UpperCamelCase = 1
@register_to_config
def __init__( self : List[str] , UpperCamelCase_ : Union[str, Any]=2_0_0_0 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Optional[int]=2_0 , UpperCamelCase_ : Optional[Any]=1E-3 ):
lowerCAmelCase : Dict = None
lowerCAmelCase : Tuple = None
lowerCAmelCase : Tuple = None
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int = None ):
lowerCAmelCase : Tuple = torch.linspace(1 , self.config.sampling_eps , _a , device=_a )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any]=None ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCAmelCase : Any = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCAmelCase : Union[str, Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCAmelCase : Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCAmelCase : List[Any] = std.unsqueeze(-1 )
lowerCAmelCase : List[str] = -score / std
# compute
lowerCAmelCase : str = -1.0 / len(self.timesteps )
lowerCAmelCase : List[str] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCAmelCase : Tuple = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCAmelCase : List[str] = beta_t.unsqueeze(-1 )
lowerCAmelCase : Dict = -0.5 * beta_t * x
lowerCAmelCase : Dict = torch.sqrt(_a )
lowerCAmelCase : List[Any] = drift - diffusion**2 * score
lowerCAmelCase : List[str] = x + drift * dt
# add noise
lowerCAmelCase : int = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype )
lowerCAmelCase : List[str] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 369 |
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
snake_case__ : Union[str, Any] = '''src/transformers'''
# Matches is_xxx_available()
snake_case__ : int = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
snake_case__ : List[str] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
snake_case__ : List[str] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
snake_case__ : Optional[Any] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
snake_case__ : Union[str, Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
snake_case__ : Any = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
snake_case__ : Union[str, Any] = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
snake_case__ : Optional[Any] = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
snake_case__ : Optional[Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
snake_case__ : Dict = re.compile(R'''^\s*try:''')
# Catches a line with else:
snake_case__ : int = re.compile(R'''^\s*else:''')
def _snake_case ( _snake_case : Optional[Any] ):
if _re_test_backend.search(_snake_case ) is None:
return None
lowerCAmelCase : Tuple = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def _snake_case ( _snake_case : Optional[Any] ):
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase : int = f.readlines()
lowerCAmelCase : Tuple = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase : List[str] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
lowerCAmelCase : str = _re_one_line_import_struct.search(_snake_case ).groups()[0]
lowerCAmelCase : Dict = re.findall('''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowerCAmelCase : Tuple = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
lowerCAmelCase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase : str = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowerCAmelCase : int = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
lowerCAmelCase : str = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : Dict = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
lowerCAmelCase : Any = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : List[str] = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase : Optional[Any] = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowerCAmelCase : Optional[Any] = lines[line_index]
lowerCAmelCase : List[Any] = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase : List[str] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowerCAmelCase : Any = lines[line_index]
lowerCAmelCase : Tuple = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase : Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _snake_case ( _snake_case : Dict , _snake_case : Optional[Any] ):
def find_duplicates(_snake_case : Tuple ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase : Any = []
for key in import_dict_objects.keys():
lowerCAmelCase : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowerCAmelCase : Optional[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase : Tuple = '''base imports''' if key == '''none''' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def _snake_case ( ):
lowerCAmelCase : int = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
lowerCAmelCase : List[Any] = os.path.join(_snake_case , '''__init__.py''' )
lowerCAmelCase : List[Any] = parse_init(_snake_case )
if objects is not None:
lowerCAmelCase : Tuple = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
lowerCAmelCase : int = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def _snake_case ( ):
lowerCAmelCase : Optional[Any] = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowerCAmelCase : Dict = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
lowerCAmelCase : Optional[int] = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase : Optional[Any] = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
lowerCAmelCase : Any = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
snake_case__ : str = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _snake_case ( ):
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase : Any = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(_snake_case , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase : Any = spec.loader.load_module()
lowerCAmelCase : Optional[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_snake_case ) > 0:
lowerCAmelCase : Dict = '''\n'''.join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 314 | 0 |
"""simple docstring"""
from __future__ import annotations
def A ( snake_case :Dict ) -> List[str]:
__UpperCamelCase = str(UpperCamelCase__ )
return len(UpperCamelCase__ ) == 9 and set(UpperCamelCase__ ) == set('123456789' )
def A ( ) -> Optional[Any]:
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
__UpperCamelCase = 1_0_0_0_0_2 * base_num
if is_9_pandigital(UpperCamelCase__ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
__UpperCamelCase = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(UpperCamelCase__ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 316 |
'''simple docstring'''
from math import sqrt
def _UpperCamelCase ( UpperCamelCase__ = 1_0_0_0_0_0_0 ):
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(UpperCamelCase__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""") | 163 | 0 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 7 , _SCREAMING_SNAKE_CASE = 100_0000 ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Dict = 0
lowerCAmelCase__ :Any = 1
for current_denominator in range(1 , limit + 1 ):
lowerCAmelCase__ :Union[str, Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowerCAmelCase__ :List[Any] = current_numerator
lowerCAmelCase__ :int = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 254 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
lowerCAmelCase__ :int = credit_card_number
lowerCAmelCase__ :Tuple = 0
lowerCAmelCase__ :int = len(_SCREAMING_SNAKE_CASE ) - 2
for i in range(_SCREAMING_SNAKE_CASE , -1 , -2 ):
# double the value of every second digit
lowerCAmelCase__ :Optional[Any] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowerCAmelCase__ :str = cc_number[:i] + str(_SCREAMING_SNAKE_CASE ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(_SCREAMING_SNAKE_CASE ) <= 16:
print(F"{error_message} of its length." )
return False
if not validate_initial_digits(_SCREAMING_SNAKE_CASE ):
print(F"{error_message} of its first two digits." )
return False
if not luhn_validation(_SCREAMING_SNAKE_CASE ):
print(F"{error_message} it fails the Luhn check." )
return False
print(F"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 254 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowerCamelCase : List[str] = logging.get_logger(__name__)
class __lowerCAmelCase (_lowerCAmelCase ):
'''simple docstring'''
def __init__(self : Optional[int] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 2 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
lowercase_ = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCAmelCase ( ):
"""simple docstring"""
__A = '''https://pypi.org/pypi/diffusers/json'''
__A = json.loads(request.urlopen(__UpperCamelCase ).read() )['''releases'''].keys()
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : version.Version(__UpperCamelCase ) )
def lowerCAmelCase ( ):
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__UpperCamelCase )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
__A = Path(__UpperCamelCase ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
init_hf_modules()
__A = Path(__UpperCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
__A = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__A = f.read()
# Imports of the form `import .xxx`
__A = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(__UpperCamelCase ) )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = False
__A = [module_file]
__A = []
# Let's recurse through all relative imports
while not no_change:
__A = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__UpperCamelCase ) )
__A = Path(__UpperCamelCase ).parent
__A = [str(module_path / m ) for m in new_imports]
__A = [f for f in new_import_files if f not in all_relative_imports]
__A = [f'{f}.py' for f in new_import_files]
__A = len(__UpperCamelCase ) == 0
all_relative_imports.extend(__UpperCamelCase )
return all_relative_imports
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__A = f.read()
# Imports of the form `import xxx`
__A = re.findall('''^\s*import\s+(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
__A = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
__A = list(set(__UpperCamelCase ) )
__A = []
for imp in imports:
try:
importlib.import_module(__UpperCamelCase )
except ImportError:
missing_packages.append(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
f'{", ".join(__UpperCamelCase )}. Run `pip install {" ".join(__UpperCamelCase )}`' )
return get_relative_imports(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = module_path.replace(os.path.sep , '''.''' )
__A = importlib.import_module(__UpperCamelCase )
if class_name is None:
return find_pipeline_class(__UpperCamelCase )
return getattr(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
__A = dict(inspect.getmembers(__UpperCamelCase , inspect.isclass ) )
__A = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __UpperCamelCase )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
f' {loaded_module}.' )
__A = cls
return pipeline_class
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ):
"""simple docstring"""
__A = str(__UpperCamelCase )
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
__A = module_file_or_url
__A = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
__A = get_diffusers_versions()
# cut ".dev0"
__A = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
__A = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(f'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
__A = f'v{revision}'
elif revision == "main":
__A = revision
else:
raise ValueError(
f'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
f' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
__A = COMMUNITY_PIPELINES_URL.format(revision=__UpperCamelCase , pipeline=__UpperCamelCase )
try:
__A = cached_download(
__UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , )
__A = '''git'''
__A = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
__A = hf_hub_download(
__UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , )
__A = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
__A = check_imports(__UpperCamelCase )
# Now we move the module inside our cached dynamic modules.
__A = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__UpperCamelCase )
__A = Path(__UpperCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__UpperCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
__A = f'{module_needed}.py'
shutil.copy(os.path.join(__UpperCamelCase , __UpperCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__A = use_auth_token
elif use_auth_token is True:
__A = HfFolder.get_token()
else:
__A = None
__A = model_info(__UpperCamelCase , revision=__UpperCamelCase , token=__UpperCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__A = submodule_path / commit_hash
__A = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__UpperCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(__UpperCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__UpperCamelCase , f'{module_needed}.py' , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , )
return os.path.join(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , **__UpperCamelCase , ):
"""simple docstring"""
__A = get_cached_module_file(
__UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , )
return get_class_in_module(__UpperCamelCase , final_module.replace('''.py''' , '''''' ) )
| 266 | 0 |
import mpmath # for roots of unity
import numpy as np
class lowercase__ :
def __init__( self : Optional[int] , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[int]=None ):
# Input as list
SCREAMING_SNAKE_CASE__ = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE__ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE__ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE__ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE__ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE__ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE__ = self.__multiply()
def A_ ( self : Tuple , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase_ ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE__ = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE__ = [[] for i in range(UpperCAmelCase_ )]
SCREAMING_SNAKE_CASE__ = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ = new_dft
SCREAMING_SNAKE_CASE__ = next_ncol // 2
return dft[0]
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = self.__dft('A' )
SCREAMING_SNAKE_CASE__ = self.__dft('B' )
SCREAMING_SNAKE_CASE__ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE__ = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE__ = [[] for i in range(UpperCAmelCase_ )]
SCREAMING_SNAKE_CASE__ = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE__ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE__ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Any ):
SCREAMING_SNAKE_CASE__ = 'A = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE__ = 'B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE__ = 'A*B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__snake_case = logging.getLogger(__name__)
class lowercase__ ( _UpperCAmelCase ):
A__ : Tuple ="""summarization"""
A__ : Optional[int] =["""loss"""]
A__ : Optional[Any] =ROUGE_KEYS
A__ : str ="""rouge2"""
def __init__( self : List[str] , UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ):
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE__ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(UpperCAmelCase_ , num_labels=UpperCAmelCase_ , mode=self.mode , **UpperCAmelCase_ )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE__ = Path(self.output_dir ) / 'metrics.json'
SCREAMING_SNAKE_CASE__ = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = defaultdict(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.config.model_type
SCREAMING_SNAKE_CASE__ = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
SCREAMING_SNAKE_CASE__ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE__ = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
SCREAMING_SNAKE_CASE__ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE__ = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], F'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE__ = get_git_info()['repo_sha']
SCREAMING_SNAKE_CASE__ = hparams.num_workers
SCREAMING_SNAKE_CASE__ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE__ = self.decoder_start_token_id
SCREAMING_SNAKE_CASE__ = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE__ = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE__ = self.model.config.max_length
SCREAMING_SNAKE_CASE__ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def A_ ( self : List[str] , UpperCAmelCase_ : Dict[str, torch.Tensor] ):
SCREAMING_SNAKE_CASE__ = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(UpperCAmelCase_ , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
SCREAMING_SNAKE_CASE__ = True
return readable_batch
def A_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str ):
return self.model(UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : Dict , UpperCAmelCase_ : List[int] ):
SCREAMING_SNAKE_CASE__ = self.tokenizer.batch_decode(
UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return lmap(str.strip , UpperCAmelCase_ )
def A_ ( self : List[Any] , UpperCAmelCase_ : dict ):
SCREAMING_SNAKE_CASE__ = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch['input_ids'], batch['attention_mask']
SCREAMING_SNAKE_CASE__ = batch['labels']
if isinstance(self.model , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = self.model._shift_right(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = shift_tokens_right(UpperCAmelCase_ , UpperCAmelCase_ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE__ = decoder_input_ids
self.save_readable_batch(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE__ = nn.CrossEntropyLoss(ignore_index=UpperCAmelCase_ )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE__ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ = nn.functional.log_softmax(UpperCAmelCase_ , dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = label_smoothed_nll_loss(
UpperCAmelCase_ , UpperCAmelCase_ , self.hparams.label_smoothing , ignore_index=UpperCAmelCase_ )
return (loss,)
@property
def A_ ( self : Dict ):
return self.tokenizer.pad_token_id
def A_ ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = self._step(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = dict(zip(self.loss_names , UpperCAmelCase_ ) )
# tokens per batch
SCREAMING_SNAKE_CASE__ = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE__ = batch['input_ids'].shape[0]
SCREAMING_SNAKE_CASE__ = batch['input_ids'].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE__ = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def A_ ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple ):
return self._generative_step(UpperCAmelCase_ )
def A_ ( self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple="val" ):
self.step_count += 1
SCREAMING_SNAKE_CASE__ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE__ = losses['loss']
SCREAMING_SNAKE_CASE__ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
SCREAMING_SNAKE_CASE__ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE__ = torch.tensor(UpperCAmelCase_ ).type_as(UpperCAmelCase_ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {F'{prefix}_avg_{k}': x for k, x in losses.items()}
SCREAMING_SNAKE_CASE__ = self.step_count
self.metrics[prefix].append(UpperCAmelCase_ ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE__ = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'{prefix}_loss': loss,
F'{prefix}_{self.val_metric}': metric_tensor,
}
def A_ ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ):
return calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : str , UpperCAmelCase_ : dict ):
SCREAMING_SNAKE_CASE__ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE__ = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=UpperCAmelCase_ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE__ = (time.time() - ta) / batch['input_ids'].shape[0]
SCREAMING_SNAKE_CASE__ = self.ids_to_clean_text(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.ids_to_clean_text(batch['labels'] )
SCREAMING_SNAKE_CASE__ = self._step(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = dict(zip(self.loss_names , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = self.calc_generative_metrics(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = np.mean(lmap(UpperCAmelCase_ , UpperCAmelCase_ ) )
base_metrics.update(gen_time=UpperCAmelCase_ , gen_len=UpperCAmelCase_ , preds=UpperCAmelCase_ , target=UpperCAmelCase_ , **UpperCAmelCase_ )
return base_metrics
def A_ ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int ):
return self._generative_step(UpperCAmelCase_ )
def A_ ( self : Any , UpperCAmelCase_ : List[str] ):
return self.validation_epoch_end(UpperCAmelCase_ , prefix='test' )
def A_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = self.n_obs[type_path]
SCREAMING_SNAKE_CASE__ = self.target_lens[type_path]
SCREAMING_SNAKE_CASE__ = self.dataset_class(
self.tokenizer , type_path=UpperCAmelCase_ , n_obs=UpperCAmelCase_ , max_target_length=UpperCAmelCase_ , **self.dataset_kwargs , )
return dataset
def A_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : bool = False ):
SCREAMING_SNAKE_CASE__ = self.get_dataset(UpperCAmelCase_ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE__ = dataset.make_sortish_sampler(UpperCAmelCase_ , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , collate_fn=dataset.collate_fn , shuffle=UpperCAmelCase_ , num_workers=self.num_workers , sampler=UpperCAmelCase_ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE__ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCAmelCase_ , batch_sampler=UpperCAmelCase_ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , collate_fn=dataset.collate_fn , shuffle=UpperCAmelCase_ , num_workers=self.num_workers , sampler=UpperCAmelCase_ , )
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=UpperCAmelCase_ )
return dataloader
def A_ ( self : str ):
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def A_ ( self : int ):
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def A_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
BaseTransformer.add_model_specific_args(UpperCAmelCase_ , UpperCAmelCase_ )
add_generic_args(UpperCAmelCase_ , UpperCAmelCase_ )
parser.add_argument(
'--max_source_length' , default=1024 , type=UpperCAmelCase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=56 , type=UpperCAmelCase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=142 , type=UpperCAmelCase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=142 , type=UpperCAmelCase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=UpperCAmelCase_ )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=UpperCAmelCase_ )
parser.add_argument('--max_tokens_per_batch' , type=UpperCAmelCase_ , default=UpperCAmelCase_ )
parser.add_argument('--logger_name' , type=UpperCAmelCase_ , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=UpperCAmelCase_ , default=-1 , required=UpperCAmelCase_ , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=UpperCAmelCase_ , default=500 , required=UpperCAmelCase_ , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=UpperCAmelCase_ , default=-1 , required=UpperCAmelCase_ , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=UpperCAmelCase_ , default='summarization' , required=UpperCAmelCase_ , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=UpperCAmelCase_ , default=0.0 , required=UpperCAmelCase_ )
parser.add_argument('--src_lang' , type=UpperCAmelCase_ , default='' , required=UpperCAmelCase_ )
parser.add_argument('--tgt_lang' , type=UpperCAmelCase_ , default='' , required=UpperCAmelCase_ )
parser.add_argument('--eval_beams' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , required=UpperCAmelCase_ )
parser.add_argument(
'--val_metric' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , required=UpperCAmelCase_ , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=UpperCAmelCase_ , default=1 , required=UpperCAmelCase_ , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=UpperCAmelCase_ , default=-1 , required=UpperCAmelCase_ , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class lowercase__ ( _UpperCAmelCase ):
A__ : Optional[Any] ="""translation"""
A__ : Dict =["""loss"""]
A__ : Optional[int] =["""bleu"""]
A__ : Union[str, Any] ="""bleu"""
def __init__( self : Tuple , UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ):
super().__init__(UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = hparams.src_lang
SCREAMING_SNAKE_CASE__ = hparams.tgt_lang
def A_ ( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict ):
return calculate_bleu(UpperCAmelCase_ , UpperCAmelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_=None ) -> SummarizationModule:
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=UpperCamelCase_ )
check_output_dir(UpperCamelCase_ , expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE__ = SummarizationModule(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = TranslationModule(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
SCREAMING_SNAKE_CASE__ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE__ = os.environ.get('WANDB_PROJECT' , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = WandbLogger(name=model.output_dir.name , project=UpperCamelCase_ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE__ = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE__ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = args.val_metric == 'loss'
SCREAMING_SNAKE_CASE__ = generic_train(
UpperCamelCase_ , UpperCamelCase_ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , UpperCamelCase_ ) , early_stopping_callback=UpperCamelCase_ , logger=UpperCamelCase_ , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE__ = ''
SCREAMING_SNAKE_CASE__ = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=UpperCamelCase_ ) )
if checkpoints:
SCREAMING_SNAKE_CASE__ = checkpoints[-1]
SCREAMING_SNAKE_CASE__ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = pl.Trainer.add_argparse_args(parser)
__snake_case = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__snake_case = parser.parse_args()
main(args)
| 169 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = 0
UpperCamelCase__ = False
UpperCamelCase__ = 3.0
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=lowercase_ ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} )
@require_cuda
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
lowercase_ : Any = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
lowercase_ : Tuple = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
lowercase_ : Dict = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , lowercase_ )
@require_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : List[str] = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
if __name__ == "__main__":
_lowercase : Any = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
_lowercase : str = Accelerator(kwargs_handlers=[ddp_scaler])
_lowercase : Tuple = torch.nn.Linear(100, 200)
_lowercase : Dict = accelerator.prepare(model)
# Check the values changed in kwargs
_lowercase : str = ""
_lowercase : List[str] = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 239 | '''simple docstring'''
_lowercase : str = tuple[float, float, float]
_lowercase : List[Any] = tuple[float, float, float]
def lowerCamelCase ( UpperCAmelCase__ : Pointad , UpperCAmelCase__ : Pointad ) -> Vectorad:
lowercase_ : List[str] = end_pointa[0] - end_pointa[0]
lowercase_ : Union[str, Any] = end_pointa[1] - end_pointa[1]
lowercase_ : List[Any] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def lowerCamelCase ( UpperCAmelCase__ : Vectorad , UpperCAmelCase__ : Vectorad ) -> Vectorad:
lowercase_ : List[Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowercase_ : Union[str, Any] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowercase_ : List[str] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def lowerCamelCase ( UpperCAmelCase__ : Vectorad , UpperCAmelCase__ : int ) -> bool:
return tuple(round(UpperCAmelCase__ , UpperCAmelCase__ ) for x in vector ) == (0, 0, 0)
def lowerCamelCase ( UpperCAmelCase__ : Pointad , UpperCAmelCase__ : Pointad , UpperCAmelCase__ : Pointad , UpperCAmelCase__ : int = 10 ) -> bool:
lowercase_ : Dict = create_vector(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Optional[int] = create_vector(UpperCAmelCase__ , UpperCAmelCase__ )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ )
| 239 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__A : Tuple = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Tuple=None ):
'''simple docstring'''
_UpperCAmelCase = True
while ask_again:
_UpperCAmelCase = input(_SCREAMING_SNAKE_CASE )
try:
if default is not None and len(_SCREAMING_SNAKE_CASE ) == 0:
return default
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int]=[] , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Dict=0 ):
'''simple docstring'''
_UpperCAmelCase = BulletMenu(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = menu.run(default_choice=_SCREAMING_SNAKE_CASE )
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class _a ( argparse.RawDescriptionHelpFormatter):
"""simple docstring"""
def lowercase__ ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : List[Any] )->Optional[int]:
_UpperCAmelCase = super()._format_usage(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 326 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
_UpperCAmelCase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase = int(sequence[i] , 2 )
return sequence
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_UpperCAmelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_UpperCAmelCase = gray_code_sequence_string(bit_count - 1 )
_UpperCAmelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_UpperCAmelCase = '''0''' + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_UpperCAmelCase = '''1''' + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 1 |
import pprint
import requests
_A : str = 'https://zenquotes.io/api'
def _a ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def _a ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
_A : Dict = random_quotes()
pprint.pprint(response)
| 142 |
import datasets
from .evaluate import evaluate
_A : Optional[int] = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
_A : int = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
_A : int = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __lowerCamelCase ( self : Optional[Any] ) ->List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def __lowerCamelCase ( self : Dict , A : Tuple , A : Tuple ) ->int:
lowerCamelCase__ : Optional[Any] = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCamelCase__ : int = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCamelCase__ : Tuple = evaluate(dataset=A , predictions=A )
return score
| 142 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
A = logging.get_logger(__name__)
A = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
A = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
A = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
A = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
A = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
A = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
A = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
A = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
A = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class __lowercase ( a_ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __lowercase ( a_ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
A = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
A = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(a_ )
class __lowercase :
'''simple docstring'''
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
if titles is None and texts is None:
return super().__call__(
lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
elif titles is None or texts is None:
__a : List[str] = titles if texts is None else texts
return super().__call__(
lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
__a : List[str] = titles if not isinstance(lowercase_ , lowercase_ ) else [titles]
__a : int = texts if not isinstance(lowercase_ , lowercase_ ) else [texts]
__a : List[str] = len(lowercase_ )
__a : Optional[Any] = questions if not isinstance(lowercase_ , lowercase_ ) else [questions] * n_passages
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
f"""There should be as many titles than texts but got {len(lowercase_ )} titles and {len(lowercase_ )} texts.""" )
__a : Tuple = super().__call__(lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ )['''input_ids''']
__a : List[Any] = super().__call__(lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ )['''input_ids''']
__a : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase_ , lowercase_ )
]
}
if return_attention_mask is not False:
__a : Any = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__a : str = attention_mask
return self.pad(lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 16 , _UpperCAmelCase = 64 , _UpperCAmelCase = 4 , ):
__a : int = reader_input['''input_ids''']
__a , __a , __a : str = reader_output[:3]
__a : Optional[int] = len(lowercase_ )
__a : Optional[Any] = sorted(range(lowercase_ ) , reverse=lowercase_ , key=relevance_logits.__getitem__ )
__a : Dict = []
for doc_id in sorted_docs:
__a : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__a : List[str] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__a : int = sequence_ids.index(self.pad_token_id )
else:
__a : Dict = len(lowercase_ )
__a : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase_ , top_spans=lowercase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase_ , start_index=lowercase_ , end_index=lowercase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowercase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a : Union[str, Any] = []
for start_index, start_score in enumerate(lowercase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__a : Tuple = sorted(lowercase_ , key=lambda _UpperCAmelCase : x[1] , reverse=lowercase_ )
__a : Dict = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
__a : List[Any] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowercase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a_ )
class __lowercase ( a_ , a_ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase = ['''input_ids''', '''attention_mask'''] | 364 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowercase :
'''simple docstring'''
@staticmethod
def _lowerCamelCase ( *_UpperCAmelCase , **_UpperCAmelCase ):
pass
def __A ( a_ :Image) -> str:
__a : List[str] = hashlib.mda(image.tobytes())
return m.hexdigest()[:10]
def __A ( a_ :Image) -> Dict:
__a : Any = np.array(a_)
__a : Tuple = npimg.shape
return {"hash": hashimage(a_), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__lowerCAmelCase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = MaskGenerationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def _lowerCamelCase ( self ):
pass
@slow
@require_torch
def _lowerCamelCase ( self ):
__a : Dict = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
__a : Optional[Any] = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
__a : Optional[int] = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_4_4_4},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_2_1},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_1_6_7},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_1_3_2},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_0_5_3},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_9_6_7},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.9_9_3},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_9_0_9},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_8_7_9},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_8_3_4},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_7_1_6},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_6_1_2},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_5_9_9},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_5_5_2},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_5_3_2},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_5_1_6},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_4_9_9},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_4_8_3},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_4_6_4},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.9_4_3},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.9_4_3},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_4_0_8},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_3_3_5},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_3_2_6},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_2_6_2},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_9_9_9},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_9_8_6},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_9_8_4},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_8_7_3},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def _lowerCamelCase ( self ):
__a : Dict = '''facebook/sam-vit-huge'''
__a : Tuple = pipeline('''mask-generation''' , model=_UpperCAmelCase )
__a : List[Any] = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__a : Optional[int] = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_4_4_4},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_2_1_0},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_1_6_7},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_1_3_2},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_0_5_3},
] , ) | 188 | 0 |
import heapq as hq
import math
from collections.abc import Iterator
class __snake_case :
def __init__( self : str , A_ : int):
lowerCAmelCase_ : Optional[int] = str(id_)
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Any = {} # {vertex:distance}
def __lt__( self : Tuple , A_ : Optional[int]):
return self.key < other.key
def __repr__( self : List[Any]):
return self.id
def UpperCAmelCase__ ( self : int , A_ : str):
self.neighbors.append(A_)
def UpperCAmelCase__ ( self : Dict , A_ : List[Any] , A_ : Tuple):
lowerCAmelCase_ : List[Any] = weight
def UpperCamelCase( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] ,__UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] ,__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : list ,__UpperCamelCase : Vertex ):
lowerCAmelCase_ : int = []
for u in graph:
lowerCAmelCase_ : Union[str, Any] = math.inf
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Tuple = graph[:]
while q:
lowerCAmelCase_ : Any = min(__UpperCamelCase )
q.remove(__UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowerCAmelCase_ : Optional[Any] = u
lowerCAmelCase_ : Any = u.edges[v.id]
for i in range(1 ,len(__UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCamelCase( __UpperCamelCase : list ,__UpperCamelCase : Vertex ):
for u in graph:
lowerCAmelCase_ : List[Any] = math.inf
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : List[Any] = list(__UpperCamelCase )
hq.heapify(__UpperCamelCase )
while h:
lowerCAmelCase_ : List[str] = hq.heappop(__UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowerCAmelCase_ : Dict = u
lowerCAmelCase_ : List[Any] = u.edges[v.id]
hq.heapify(__UpperCamelCase )
for i in range(1 ,len(__UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCamelCase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCamelCase( ):
lowerCAmelCase_ : List[str] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' ,type=__UpperCamelCase ,default=1 ,help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' ,type=__UpperCamelCase ,help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) ,)
# rest from the training program
parser.add_argument('''training_script_args''' ,nargs=__UpperCamelCase )
return parser.parse_args()
def UpperCamelCase( ):
lowerCAmelCase_ : str = parse_args()
# Import training_script as a module.
lowerCAmelCase_ : str = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase_ : Tuple = script_fpath.stem
lowerCAmelCase_ : Union[str, Any] = importlib.import_module(__UpperCamelCase )
# Patch sys.argv
lowerCAmelCase_ : Optional[int] = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 103 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (IPNDMScheduler,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = (("""num_inference_steps""", 50),)
def lowercase_ ( self : Optional[int] , **UpperCamelCase__ : Union[str, Any])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: List[Any] = {"num_train_timesteps": 1_0_0_0}
config.update(**UpperCamelCase__)
return config
def lowercase_ ( self : int , UpperCamelCase__ : Optional[Any]=0 , **UpperCamelCase__ : int)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: List[Any] = dict(self.forward_default_kwargs)
__lowerCAmelCase: Dict = kwargs.pop("num_inference_steps" , UpperCamelCase__)
__lowerCAmelCase: Dict = self.dummy_sample
__lowerCAmelCase: List[str] = 0.1 * sample
__lowerCAmelCase: int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: List[str] = self.get_scheduler_config(**UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
__lowerCAmelCase: Union[str, Any] = dummy_past_residuals[:]
if time_step is None:
__lowerCAmelCase: Optional[Any] = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
__lowerCAmelCase: str = scheduler_class.from_pretrained(UpperCamelCase__)
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
__lowerCAmelCase: List[str] = dummy_past_residuals[:]
__lowerCAmelCase: Union[str, Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
__lowerCAmelCase: int = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
__lowerCAmelCase: List[str] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
__lowerCAmelCase: Optional[int] = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowercase_ ( self : Optional[int])-> Any:
'''simple docstring'''
pass
def lowercase_ ( self : Tuple , UpperCamelCase__ : Union[str, Any]=0 , **UpperCamelCase__ : List[str])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = dict(self.forward_default_kwargs)
__lowerCAmelCase: Dict = kwargs.pop("num_inference_steps" , UpperCamelCase__)
__lowerCAmelCase: List[str] = self.dummy_sample
__lowerCAmelCase: Optional[Any] = 0.1 * sample
__lowerCAmelCase: Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: List[Any] = self.get_scheduler_config()
__lowerCAmelCase: Any = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase: int = dummy_past_residuals[:]
if time_step is None:
__lowerCAmelCase: Optional[Any] = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
__lowerCAmelCase: int = scheduler_class.from_pretrained(UpperCamelCase__)
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase: Optional[Any] = dummy_past_residuals[:]
__lowerCAmelCase: Any = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
__lowerCAmelCase: int = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
__lowerCAmelCase: Tuple = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
__lowerCAmelCase: List[Any] = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowercase_ ( self : Dict , **UpperCamelCase__ : Tuple)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.scheduler_classes[0]
__lowerCAmelCase: Optional[int] = self.get_scheduler_config(**UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = scheduler_class(**UpperCamelCase__)
__lowerCAmelCase: List[str] = 1_0
__lowerCAmelCase: Any = self.dummy_model()
__lowerCAmelCase: Any = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__)
for i, t in enumerate(scheduler.timesteps):
__lowerCAmelCase: List[Any] = model(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: Optional[int] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
for i, t in enumerate(scheduler.timesteps):
__lowerCAmelCase: List[Any] = model(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: str = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
return sample
def lowercase_ ( self : List[str])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: str = dict(self.forward_default_kwargs)
__lowerCAmelCase: Optional[Any] = kwargs.pop("num_inference_steps" , UpperCamelCase__)
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Dict = self.get_scheduler_config()
__lowerCAmelCase: Any = scheduler_class(**UpperCamelCase__)
__lowerCAmelCase: Optional[int] = self.dummy_sample
__lowerCAmelCase: Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , "set_timesteps"):
scheduler.set_timesteps(UpperCamelCase__)
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , "set_timesteps"):
__lowerCAmelCase: Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCAmelCase: List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__lowerCAmelCase: List[Any] = dummy_past_residuals[:]
__lowerCAmelCase: int = scheduler.timesteps[5]
__lowerCAmelCase: List[str] = scheduler.timesteps[6]
__lowerCAmelCase: str = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
__lowerCAmelCase: int = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
__lowerCAmelCase: Union[str, Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
__lowerCAmelCase: Dict = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def lowercase_ ( self : Optional[int])-> List[Any]:
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__)
def lowercase_ ( self : Any)-> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__)
def lowercase_ ( self : Union[str, Any])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: str = self.full_loop()
__lowerCAmelCase: str = torch.mean(torch.abs(UpperCamelCase__))
assert abs(result_mean.item() - 2_5_4_0_5_2_9) < 1_0
| 364 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class snake_case ( unittest.TestCase ):
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : Any=7 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : List[Any]=1_8 , UpperCamelCase__ : List[Any]=3_0 , UpperCamelCase__ : List[str]=4_0_0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=[0.48145466, 0.4578275, 0.40821073] , UpperCamelCase__ : str=[0.26862954, 0.26130258, 0.27577711] , UpperCamelCase__ : List[str]=True , )-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = size if size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowerCAmelCase: Union[str, Any] = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
__lowerCAmelCase: Optional[int] = parent
__lowerCAmelCase: List[str] = batch_size
__lowerCAmelCase: Union[str, Any] = num_channels
__lowerCAmelCase: Optional[Any] = image_size
__lowerCAmelCase: Tuple = min_resolution
__lowerCAmelCase: List[str] = max_resolution
__lowerCAmelCase: List[Any] = do_resize
__lowerCAmelCase: Union[str, Any] = size
__lowerCAmelCase: List[Any] = do_center_crop
__lowerCAmelCase: Optional[int] = crop_size
__lowerCAmelCase: Dict = do_normalize
__lowerCAmelCase: List[str] = image_mean
__lowerCAmelCase: Optional[int] = image_std
__lowerCAmelCase: str = do_convert_rgb
def lowercase_ ( self : Tuple)-> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def lowercase_ ( self : Any , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Dict=False)-> List[str]:
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__lowerCAmelCase: Optional[int] = []
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta))
else:
__lowerCAmelCase: List[str] = []
for i in range(self.batch_size):
__lowerCAmelCase , __lowerCAmelCase: List[str] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2)
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__lowerCAmelCase: Union[str, Any] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1)) for x in image_inputs]
if torchify:
__lowerCAmelCase: str = [torch.from_numpy(UpperCamelCase__) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : str = ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase_ ( self : Any)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Tuple = ChineseCLIPImageProcessingTester(self , do_center_crop=UpperCamelCase__)
@property
def lowercase_ ( self : Any)-> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Union[str, Any])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize"))
self.assertTrue(hasattr(UpperCamelCase__ , "size"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_std"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_convert_rgb"))
def lowercase_ ( self : List[Any])-> str:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 2_2_4, "width": 2_2_4})
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8})
__lowerCAmelCase: List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4)
self.assertEqual(image_processor.size , {"shortest_edge": 4_2})
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4})
def lowercase_ ( self : List[str])-> Optional[int]:
'''simple docstring'''
pass
def lowercase_ ( self : Any)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: int = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCAmelCase: Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image)
# Test not batched input
__lowerCAmelCase: Optional[int] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase: int = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase_ ( self : int)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowerCAmelCase: List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray)
# Test not batched input
__lowerCAmelCase: List[Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase: Any = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase_ ( self : int)-> str:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowerCAmelCase: Dict = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor)
# Test not batched input
__lowerCAmelCase: Tuple = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase: Optional[int] = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase_ ( self : int)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = 3
@property
def lowercase_ ( self : Union[str, Any])-> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : int)-> str:
'''simple docstring'''
__lowerCAmelCase: int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize"))
self.assertTrue(hasattr(UpperCamelCase__ , "size"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_std"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_convert_rgb"))
def lowercase_ ( self : Tuple)-> Any:
'''simple docstring'''
pass
def lowercase_ ( self : Tuple)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCAmelCase: int = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image)
# Test not batched input
__lowerCAmelCase: List[Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase: Optional[int] = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 108 | 0 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def a_ ( lowerCamelCase="ro" , lowerCamelCase="en" , lowerCamelCase="wmt16" , lowerCamelCase=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('run pip install datasets' )
UpperCAmelCase__ = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
UpperCAmelCase__ = datasets.load_dataset(lowerCamelCase , lowerCamelCase )
if save_dir is None:
UpperCAmelCase__ = f'''{dataset}-{pair}'''
UpperCAmelCase__ = Path(lowerCamelCase )
save_dir.mkdir(exist_ok=lowerCamelCase )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
UpperCAmelCase__ = 'val' if split == 'validation' else split
UpperCAmelCase__ = save_dir.joinpath(f'''{fn}.source''' )
UpperCAmelCase__ = save_dir.joinpath(f'''{fn}.target''' )
UpperCAmelCase__ = src_path.open('w+' )
UpperCAmelCase__ = tgt_path.open('w+' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCAmelCase__ = x['translation']
src_fp.write(ex[src_lang] + '\n' )
tgt_fp.write(ex[tgt_lang] + '\n' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 98 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a = logging.get_logger(__name__)
_a = {"vocab_file": "spiece.model"}
_a = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
_a = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase__ = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
lowerCamelCase__ = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCamelCase__ = '''<|endoftext|>''' if eos_token is None else eos_token
lowerCamelCase__ = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCamelCase__ = unk_token if pad_token is None else pad_token
lowerCamelCase__ = eos_token if bos_token is None else bos_token
else:
lowerCamelCase__ = '''<pad>''' if pad_token is None else pad_token
lowerCamelCase__ = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = remove_space
lowerCamelCase__ = keep_accents
lowerCamelCase__ = vocab_file
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
# Used for whitespace normalization in input texts
# fmt : off
lowerCamelCase__ = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCamelCase__ = re.compile(
F'[{"".join(map(__lowerCAmelCase , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self ):
'''simple docstring'''
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.non_printing_characters_re.sub('''''' , __lowerCAmelCase )
# Normalize whitespaces
lowerCamelCase__ = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
lowerCamelCase__ = unicodedata.normalize('''NFC''' , __lowerCAmelCase )
return text
def __lowerCamelCase ( self , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.preprocess_text(__lowerCAmelCase )
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.PieceToId(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.IdToPiece(__lowerCAmelCase )
@staticmethod
def __lowerCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
return out_string
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = ''''''
lowerCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
lowerCamelCase__ = True
lowerCamelCase__ = []
else:
current_sub_tokens.append(__lowerCAmelCase )
lowerCamelCase__ = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase__ = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
'''simple docstring'''
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = self.preprocess_text(__lowerCAmelCase )
lowerCamelCase__ = self.sp_model.encode(__lowerCAmelCase )
else:
lowerCamelCase__ = [self.preprocess_text(__lowerCAmelCase ) for t in text]
lowerCamelCase__ = self.sp_model.encode(__lowerCAmelCase )
if return_tensors is True or return_tensors == "pt":
lowerCamelCase__ = torch.tensor(__lowerCAmelCase )
return token_ids
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.decode(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
lowerCamelCase__ = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(__lowerCAmelCase ) + F'{self.bos_token}Bot:'
)
return self.encode(text=__lowerCAmelCase )
| 209 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger()
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : LevitConfig , _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : bool = True ) -> Dict:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
UpperCAmelCase_ : Optional[int] = timm.create_model("levit_128s" , pretrained=_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Any = timm.create_model("levit_128" , pretrained=_SCREAMING_SNAKE_CASE )
if hidden_sizes == 1_92:
UpperCAmelCase_ : List[Any] = timm.create_model("levit_192" , pretrained=_SCREAMING_SNAKE_CASE )
if hidden_sizes == 2_56:
UpperCAmelCase_ : Optional[int] = timm.create_model("levit_256" , pretrained=_SCREAMING_SNAKE_CASE )
if hidden_sizes == 3_84:
UpperCAmelCase_ : Tuple = timm.create_model("levit_384" , pretrained=_SCREAMING_SNAKE_CASE )
from_model.eval()
UpperCAmelCase_ : Dict = LevitForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval()
UpperCAmelCase_ : str = OrderedDict()
UpperCAmelCase_ : str = from_model.state_dict()
UpperCAmelCase_ : Dict = list(from_model.state_dict().keys() )
UpperCAmelCase_ : List[Any] = list(our_model.state_dict().keys() )
print(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : Union[str, Any] = weights[og_keys[i]]
our_model.load_state_dict(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = torch.randn((2, 3, 2_24, 2_24) )
UpperCAmelCase_ : Any = from_model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = our_model(_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "The model logits don't match the original one."
UpperCAmelCase_ : List[Any] = name
print(_SCREAMING_SNAKE_CASE )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
UpperCAmelCase_ : Tuple = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def a__ ( _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : str = None , _SCREAMING_SNAKE_CASE : bool = True ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = "imagenet-1k-id2label.json"
UpperCAmelCase_ : List[str] = 10_00
UpperCAmelCase_ : Tuple = (1, num_labels)
UpperCAmelCase_ : List[str] = "huggingface/label-files"
UpperCAmelCase_ : Dict = num_labels
UpperCAmelCase_ : Union[str, Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ : int = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCAmelCase_ : str = idalabel
UpperCAmelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Optional[int] = partial(_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = {
"levit-128S": 1_28,
"levit-128": 1_28,
"levit-192": 1_92,
"levit-256": 2_56,
"levit-384": 3_84,
}
UpperCAmelCase_ : Tuple = {
"levit-128S": ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-128": ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-192": ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-256": ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-384": ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , _SCREAMING_SNAKE_CASE , names_to_config[model_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 67 |
'''simple docstring'''
from collections.abc import Sequence
def a__ ( _SCREAMING_SNAKE_CASE : Sequence[float] , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(_SCREAMING_SNAKE_CASE ) )
def a__ ( _SCREAMING_SNAKE_CASE : Sequence[float] , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 0.0
for coeff in reversed(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 67 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__UpperCamelCase = Features({"""audio""": Audio()} )
__UpperCamelCase = Features({"""labels""": ClassLabel} )
__UpperCamelCase = """audio"""
__UpperCamelCase = """labels"""
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Optional[Any] ) -> List[str]:
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
UpperCAmelCase = copy.deepcopy(self )
UpperCAmelCase = self.label_schema.copy()
UpperCAmelCase = features[self.label_column]
UpperCAmelCase = label_schema
return task_template
@property
def UpperCAmelCase__ ( self :Dict ) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 78 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowercase__ : Optional[Any] = logging.getLogger(__name__)
def A_ ( snake_case : Any=2 , snake_case : Union[str, Any]=3 , snake_case : Union[str, Any]=16 , snake_case : int = 10 , snake_case : int = 2 ) -> int:
'''simple docstring'''
def get_dataset(snake_case : Optional[int] ):
__UpperCamelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__UpperCamelCase = get_dataset(snake_case )
__UpperCamelCase = get_dataset(snake_case )
__UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
__UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def A_ ( snake_case : List[str] , snake_case : int , snake_case : List[str] , snake_case : Optional[int] , snake_case : int , snake_case : str=None ) -> Any:
'''simple docstring'''
__UpperCamelCase = []
for epoch in range(snake_case ):
# Train quickly
model.train()
for batch in dataloader:
__UpperCamelCase , __UpperCamelCase = batch
__UpperCamelCase = model(snake_case )
__UpperCamelCase = torch.nn.functional.mse_loss(snake_case , snake_case )
accelerator.backward(snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self )-> Tuple:
'''simple docstring'''
super().__init__()
__UpperCamelCase = nn.Parameter(torch.randn(1 ) )
__UpperCamelCase = nn.Parameter(torch.randn(1 ) )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict:
'''simple docstring'''
return x * self.a + self.b
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self )-> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A__ ( self )-> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
# Train baseline
__UpperCamelCase = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
__UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
__UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
__UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
__UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = torch.tensor([1, 2, 3] )
__UpperCamelCase = torch.tensor([2, 3, 4] )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(net.parameters() )
__UpperCamelCase = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.9_9 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
__UpperCamelCase = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def A__ ( self )-> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase__ : Optional[int] = "/tmp/accelerate/state_checkpointing"
lowercase__ : List[Any] = DummyModel()
lowercase__ : Tuple = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowercase__ : int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowercase__ , lowercase__ : str = dummy_dataloaders()
lowercase__ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowercase__ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowercase__ , lowercase__ : str = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowercase__ : int = group["params"][0].device
break
assert param_device.type == accelerator.device.type
lowercase__ : Union[str, Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
lowercase__ : Any = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
lowercase__ : List[Any] = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 328 | 0 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def _UpperCamelCase ( UpperCamelCase__ ):
@wraps(UpperCamelCase__ )
def _inner_fn(*UpperCamelCase__ , **UpperCamelCase__ ):
warnings.warn(
(f'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , UpperCamelCase__ , )
return fn(*UpperCamelCase__ , **UpperCamelCase__ )
return _inner_fn | 283 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__A ='\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__A ='\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
__A ='\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def _UpperCamelCase ( UpperCamelCase__ ):
def remove_articles(UpperCamelCase__ ):
UpperCAmelCase__ : Tuple = re.compile(R"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(UpperCamelCase__ , """ """ , UpperCamelCase__ )
def white_space_fix(UpperCamelCase__ ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase__ ):
UpperCAmelCase__ : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase__ ) ) ) )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
return int(normalize_answer(UpperCamelCase__ ) == normalize_answer(UpperCamelCase__ ) )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Any = [any(compute_exact(UpperCamelCase__ , UpperCamelCase__ ) for ref in refs ) for pred, refs in zip(UpperCamelCase__ , UpperCamelCase__ )]
return (sum(UpperCamelCase__ ) / len(UpperCamelCase__ )) * 1_0_0
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : List[Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
UpperCAmelCase__ : List[Any] = Counter(UpperCamelCase__ )
UpperCAmelCase__ : str = Counter(UpperCamelCase__ )
UpperCAmelCase__ : Dict = Counter()
for sgram, scount in sgramcounter.items():
UpperCAmelCase__ : Dict = scount * numref
UpperCAmelCase__ : int = Counter(UpperCamelCase__ )
UpperCAmelCase__ : Optional[int] = Counter()
for cgram, ccount in cgramcounter.items():
UpperCAmelCase__ : Union[str, Any] = ccount * numref
# KEEP
UpperCAmelCase__ : str = sgramcounter_rep & cgramcounter_rep
UpperCAmelCase__ : List[Any] = keepgramcounter_rep & rgramcounter
UpperCAmelCase__ : Dict = sgramcounter_rep & rgramcounter
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Union[str, Any] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase__ : List[str] = 1
UpperCAmelCase__ : Optional[Any] = 1
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase__ : Optional[int] = keeptmpscorea / len(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
UpperCAmelCase__ : Any = keeptmpscorea / sum(keepgramcounterall_rep.values() )
UpperCAmelCase__ : Any = 0
if keepscore_precision > 0 or keepscore_recall > 0:
UpperCAmelCase__ : str = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
UpperCAmelCase__ : str = sgramcounter_rep - cgramcounter_rep
UpperCAmelCase__ : Optional[Any] = delgramcounter_rep - rgramcounter
UpperCAmelCase__ : List[str] = sgramcounter_rep - rgramcounter
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : List[Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase__ : Union[str, Any] = 1
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase__ : Optional[Any] = deltmpscorea / len(UpperCamelCase__ )
# ADDITION
UpperCAmelCase__ : Tuple = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = set(UpperCamelCase__ ) & set(UpperCamelCase__ )
UpperCAmelCase__ : List[str] = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
UpperCAmelCase__ : str = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase__ : List[Any] = 1
UpperCAmelCase__ : List[Any] = 1
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase__ : Optional[int] = addtmpscore / len(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase__ : int = addtmpscore / len(UpperCamelCase__ )
UpperCAmelCase__ : Tuple = 0
if addscore_precision > 0 or addscore_recall > 0:
UpperCAmelCase__ : int = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Dict = len(UpperCamelCase__ )
UpperCAmelCase__ : Tuple = ssent.split(""" """ )
UpperCAmelCase__ : Optional[int] = csent.split(""" """ )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : List[Any] = []
for rsent in rsents:
UpperCAmelCase__ : List[str] = rsent.split(""" """ )
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Dict = []
ragramslist.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
UpperCAmelCase__ : Optional[int] = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
UpperCAmelCase__ : Union[str, Any] = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
UpperCAmelCase__ : Any = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
UpperCAmelCase__ : Optional[int] = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
UpperCAmelCase__ : Dict = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
UpperCAmelCase__ : str = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
UpperCAmelCase__ : Dict = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
UpperCAmelCase__ : int = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
UpperCAmelCase__ : List[Any] = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(UpperCamelCase__ )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : Optional[Any] = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : str = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : Any = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : Optional[int] = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
UpperCAmelCase__ : Union[str, Any] = sum([delascore, delascore, delascore, delascore] ) / 4
UpperCAmelCase__ : Dict = sum([addascore, addascore, addascore, addascore] ) / 4
UpperCAmelCase__ : List[Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ = True , UpperCamelCase__ = "13a" , UpperCamelCase__ = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
UpperCAmelCase__ : List[str] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
UpperCAmelCase__ : Tuple = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase__ )()(UpperCamelCase__ )
else:
UpperCAmelCase__ : Tuple = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase__ )
elif tokenizer == "moses":
UpperCAmelCase__ : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(UpperCamelCase__ , return_str=UpperCamelCase__ , escape=UpperCamelCase__ )
elif tokenizer == "penn":
UpperCAmelCase__ : Dict = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase__ , return_str=UpperCamelCase__ )
else:
UpperCAmelCase__ : List[Any] = sentence
if not return_str:
UpperCAmelCase__ : List[str] = normalized_sent.split()
return normalized_sent
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if not (len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == len(UpperCamelCase__ )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
UpperCAmelCase__ : Optional[int] = 0
for src, pred, refs in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
sari_score += SARIsent(normalize(UpperCamelCase__ ) , normalize(UpperCamelCase__ ) , [normalize(UpperCamelCase__ ) for sent in refs] )
UpperCAmelCase__ : Optional[int] = sari_score / len(UpperCamelCase__ )
return 1_0_0 * sari_score
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="exp" , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , ):
UpperCAmelCase__ : int = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
UpperCAmelCase__ : int = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
UpperCAmelCase__ : int = sacrebleu.corpus_bleu(
UpperCamelCase__ , UpperCamelCase__ , smooth_method=UpperCamelCase__ , smooth_value=UpperCamelCase__ , force=UpperCamelCase__ , lowercase=UpperCamelCase__ , use_effective_order=UpperCamelCase__ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def snake_case__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""") , id="""references"""),
}) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = {}
result.update({"""sari""": compute_sari(sources=_lowerCamelCase , predictions=_lowerCamelCase , references=_lowerCamelCase)})
result.update({"""sacrebleu""": compute_sacrebleu(predictions=_lowerCamelCase , references=_lowerCamelCase)})
result.update({"""exact""": compute_em(predictions=_lowerCamelCase , references=_lowerCamelCase)})
return result | 283 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=_UpperCamelCase , )
assert hasattr(self , "env" )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Dict = f'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}'''
# distributed data settings
_lowercase : Any = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_UpperCamelCase , instance_count=_UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=_UpperCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_UpperCamelCase , py_version="py36" , )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
TrainingJobAnalytics(_UpperCamelCase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Any = self.create_estimator(_UpperCamelCase )
# run training
estimator.fit()
# result dataframe
_lowercase : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowercase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
_lowercase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowercase : Optional[int] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _UpperCamelCase )
| 250 |
'''simple docstring'''
from math import factorial
def _A ( snake_case , snake_case ) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(snake_case ) // (factorial(snake_case ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
F'''4 for group projects, there are {combinations(40, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
F'''are {combinations(10, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 250 | 1 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :List[str] = ['input_values', 'padding_mask']
def __init__( self , A_ = 1 , A_ = 2_4000 , A_ = 0.0 , A_ = None , A_ = None , **A_ , ):
'''simple docstring'''
super().__init__(feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ )
UpperCamelCase : Tuple = chunk_length_s
UpperCamelCase : Dict = overlap
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , A_ , A_ = None , A_ = False , A_ = None , A_ = None , A_ = None , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
UpperCamelCase : str = True
UpperCamelCase : str = bool(
isinstance(A_ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
UpperCamelCase : int = [np.asarray(A_ , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(A_ , np.ndarray ):
UpperCamelCase : Tuple = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
UpperCamelCase : List[str] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : int = [np.asarray(A_ ).T]
# verify inputs are valid
for idx, example in enumerate(A_ ):
if example.ndim > 2:
raise ValueError(F"""Expected input shape (channels, length) but got shape {example.shape}""" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F"""Expected mono audio but example has {example.shape[-1]} channels""" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F"""Expected stereo audio but example has {example.shape[-1]} channels""" )
UpperCamelCase : Tuple = None
UpperCamelCase : List[str] = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
UpperCamelCase : List[Any] = min(array.shape[0] for array in raw_audio )
UpperCamelCase : Union[str, Any] = int(np.floor(max_length / self.chunk_stride ) )
UpperCamelCase : Optional[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
UpperCamelCase : List[str] = max(array.shape[0] for array in raw_audio )
UpperCamelCase : Dict = int(np.ceil(max_length / self.chunk_stride ) )
UpperCamelCase : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length
UpperCamelCase : str = "max_length"
else:
UpperCamelCase : Optional[int] = input_values
# normal padding on batch
if padded_inputs is None:
UpperCamelCase : Union[str, Any] = self.pad(
A_ , max_length=A_ , truncation=A_ , padding=A_ , return_attention_mask=A_ , )
if padding:
UpperCamelCase : Any = padded_inputs.pop("attention_mask" )
UpperCamelCase : Union[str, Any] = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
UpperCamelCase : str = example[..., None]
input_values.append(example.T )
UpperCamelCase : Union[str, Any] = input_values
if return_tensors is not None:
UpperCamelCase : int = padded_inputs.convert_to_tensors(A_ )
return padded_inputs
| 140 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Any = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class A__ ( __snake_case ):
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ):
'''simple docstring'''
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class A__ ( __snake_case ):
def __init__( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = max_length
UpperCamelCase : Dict = max_position_embeddings
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ):
'''simple docstring'''
UpperCamelCase : int = input_ids.shape[-1]
UpperCamelCase : str = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"exceptions, performance degradation, or nothing at all." )
return is_done
class A__ ( __snake_case ):
def __init__( self , A_ , A_ ):
'''simple docstring'''
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"with `max_length = start_length + max_new_tokens` instead." , A_ , )
UpperCamelCase : Union[str, Any] = start_length
UpperCamelCase : List[str] = max_new_tokens
UpperCamelCase : Tuple = start_length + max_new_tokens
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ):
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class A__ ( __snake_case ):
def __init__( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Optional[int] = max_time
UpperCamelCase : Dict = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ):
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class A__ ( __snake_case ):
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ):
'''simple docstring'''
return any(criteria(A_ , A_ ) for criteria in self )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
for stopping_criterium in self:
if isinstance(A_ , A_ ):
return stopping_criterium.max_length
elif isinstance(A_ , A_ ):
return stopping_criterium.max_length
return None
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> StoppingCriteriaList:
UpperCamelCase : Tuple = stopping_criteria.max_length
UpperCamelCase : Union[str, Any] = deepcopy(_lowerCAmelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCAmelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCAmelCase ) )
return new_stopping_criteria
| 140 | 1 |
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__="" , lowerCAmelCase__="train"):
assert os.path.isdir(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = os.listdir(lowerCAmelCase__)
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , lowerCAmelCase__)
if not os.path.isfile(lowerCAmelCase__):
continue
self.documents.append(lowerCAmelCase__)
def __len__( self):
return len(self.documents)
def __getitem__( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.documents[idx]
__SCREAMING_SNAKE_CASE = document_path.split("""/""")[-1]
with open(lowerCAmelCase__ , encoding="""utf-8""") as source:
__SCREAMING_SNAKE_CASE = source.read()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = process_story(lowerCAmelCase__)
return document_name, story_lines, summary_lines
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = list(filter(lambda UpperCamelCase_ : len(UpperCamelCase_ ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) )
# for some unknown reason some lines miss a period, add it
__SCREAMING_SNAKE_CASE = [_add_missing_period(UpperCamelCase_ ) for line in nonempty_lines]
# gather article lines
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = deque(UpperCamelCase_ )
while True:
try:
__SCREAMING_SNAKE_CASE = lines.popleft()
if element.startswith("""@highlight""" ):
break
story_lines.append(UpperCamelCase_ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__SCREAMING_SNAKE_CASE = list(filter(lambda UpperCamelCase_ : not t.startswith("""@highlight""" ) , UpperCamelCase_ ) )
return story_lines, summary_lines
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""]
if line.startswith("""@highlight""" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if len(UpperCamelCase_ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(UpperCamelCase_ )) )
return sequence
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = torch.ones_like(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = sequence == pad_token_id
__SCREAMING_SNAKE_CASE = 0
return mask
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = [tokenizer.encode(UpperCamelCase_ ) for line in story_lines]
__SCREAMING_SNAKE_CASE = [token for sentence in story_lines_token_ids for token in sentence]
__SCREAMING_SNAKE_CASE = [tokenizer.encode(UpperCamelCase_ ) for line in summary_lines]
__SCREAMING_SNAKE_CASE = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
for sequence in batch:
__SCREAMING_SNAKE_CASE = -1
__SCREAMING_SNAKE_CASE = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(UpperCamelCase_ )
return torch.tensor(UpperCamelCase_ )
| 100 |
"""simple docstring"""
from math import isqrt, loga
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = False
return [i for i in range(2 , UpperCamelCase_ ) if is_prime[i]]
def _lowerCAmelCase ( UpperCamelCase_ = 80_0800 , UpperCamelCase_ = 80_0800 ):
__SCREAMING_SNAKE_CASE = degree * loga(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = int(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = calculate_prime_numbers(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 100 | 1 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def _lowercase ( self : Dict , __A : Optional[Any]=0 ):
snake_case__ : Union[str, Any] = np.random.RandomState(__A )
snake_case__ : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : Any ):
snake_case__ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : Union[str, Any] = self.get_dummy_inputs()
snake_case__ : int = pipe(**__A ).images
snake_case__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case__ : Optional[int] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Optional[int] ):
snake_case__ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case__ : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : List[Any] = self.get_dummy_inputs()
snake_case__ : str = pipe(**__A ).images
snake_case__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case__ : List[Any] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : List[Any] ):
snake_case__ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case__ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : Dict = self.get_dummy_inputs()
snake_case__ : Any = pipe(**__A ).images
snake_case__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case__ : Tuple = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : List[str] ):
snake_case__ : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case__ : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : int = self.get_dummy_inputs()
snake_case__ : Tuple = pipe(**__A ).images
snake_case__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case__ : Any = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Dict ):
snake_case__ : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case__ : List[str] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : Optional[Any] = self.get_dummy_inputs()
snake_case__ : Dict = pipe(**__A ).images
snake_case__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case__ : List[str] = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : int ):
snake_case__ : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case__ : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : Tuple = self.get_dummy_inputs()
snake_case__ : List[str] = pipe(**__A ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case__ : Union[str, Any] = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Any ):
snake_case__ : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : Union[str, Any] = self.get_dummy_inputs()
snake_case__ : List[Any] = 3 * [inputs["prompt"]]
# forward
snake_case__ : Tuple = pipe(**__A )
snake_case__ : str = output.images[0, -3:, -3:, -1]
snake_case__ : Optional[Any] = self.get_dummy_inputs()
snake_case__ : Any = 3 * [inputs.pop("prompt" )]
snake_case__ : List[str] = pipe.tokenizer(
__A , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=__A , return_tensors="np" , )
snake_case__ : str = text_inputs["input_ids"]
snake_case__ : List[Any] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
snake_case__ : Union[str, Any] = prompt_embeds
# forward
snake_case__ : Any = pipe(**__A )
snake_case__ : List[str] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _lowercase ( self : Tuple ):
snake_case__ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : Optional[int] = self.get_dummy_inputs()
snake_case__ : List[str] = 3 * ["this is a negative prompt"]
snake_case__ : List[Any] = negative_prompt
snake_case__ : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
snake_case__ : Optional[int] = pipe(**__A )
snake_case__ : List[str] = output.images[0, -3:, -3:, -1]
snake_case__ : str = self.get_dummy_inputs()
snake_case__ : str = 3 * [inputs.pop("prompt" )]
snake_case__ : Optional[int] = []
for p in [prompt, negative_prompt]:
snake_case__ : int = pipe.tokenizer(
__A , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=__A , return_tensors="np" , )
snake_case__ : int = text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
snake_case__, snake_case__ : str = embeds
# forward
snake_case__ : Any = pipe(**__A )
snake_case__ : str = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowercase ( self : Any ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowercase ( self : Dict ):
snake_case__ : int = ort.SessionOptions()
snake_case__ : Union[str, Any] = False
return options
def _lowercase ( self : Tuple ):
# using the PNDM scheduler by default
snake_case__ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : List[Any] = "A painting of a squirrel eating a burger"
np.random.seed(0 )
snake_case__ : List[str] = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=1_0 , output_type="np" )
snake_case__ : str = output.images
snake_case__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : str = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : List[Any] ):
snake_case__ : Tuple = DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
snake_case__ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__A , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : List[str] = "open neural network exchange"
snake_case__ : int = np.random.RandomState(0 )
snake_case__ : str = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__A , output_type="np" )
snake_case__ : Optional[int] = output.images
snake_case__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Optional[int] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : str ):
snake_case__ : Dict = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
snake_case__ : Any = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__A , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : Optional[Any] = "open neural network exchange"
snake_case__ : int = np.random.RandomState(0 )
snake_case__ : List[str] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__A , output_type="np" )
snake_case__ : int = output.images
snake_case__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Dict = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : List[str] ):
snake_case__ : Union[str, Any] = 0
def test_callback_fn(__A : int , __A : int , __A : np.ndarray ) -> None:
snake_case__ : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case__ : List[Any] = latents[0, -3:, -3:, -1]
snake_case__ : int = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case__ : Union[str, Any] = latents[0, -3:, -3:, -1]
snake_case__ : List[str] = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
snake_case__ : Dict = False
snake_case__ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : str = "Andromeda galaxy in a bottle"
snake_case__ : Union[str, Any] = np.random.RandomState(0 )
pipe(
prompt=__A , num_inference_steps=5 , guidance_scale=7.5 , generator=__A , callback=__A , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _lowercase ( self : List[Any] ):
snake_case__ : Dict = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(__A , __A )
assert pipe.safety_checker is None
snake_case__ : Union[str, Any] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__A )
snake_case__ : List[str] = OnnxStableDiffusionPipeline.from_pretrained(__A )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
snake_case__ : Tuple = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
| 286 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = PhobertTokenizer
a_ = False
def _lowercase ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Optional[int] = ["T@@", "i", "I", "R@@", "r", "e@@"]
snake_case__ : int = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : Dict = ["#version: 0.2", "l à</w>"]
snake_case__ : Optional[Any] = {"unk_token": "<unk>"}
snake_case__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def _lowercase ( self : List[str] , **__A : Any ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **__A )
def _lowercase ( self : Tuple , __A : List[Any] ):
snake_case__ : str = "Tôi là VinAI Research"
snake_case__ : int = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def _lowercase ( self : Optional[int] ):
snake_case__ : int = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Tuple = "Tôi là VinAI Research"
snake_case__ : List[Any] = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
snake_case__ : int = tokenizer.tokenize(__A )
print(__A )
self.assertListEqual(__A , __A )
snake_case__ : Any = tokens + [tokenizer.unk_token]
snake_case__ : Any = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
| 286 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
_A : Optional[int] ='''hf-internal-testing/tiny-random-bert'''
_A : Union[str, Any] =os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
_A : Optional[Any] ='''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = cached_file(UpperCamelCase__ , UpperCamelCase__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCamelCase__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) )
with open(os.path.join(UpperCamelCase__ , """refs""" , """main""" ) ) as f:
lowerCamelCase__ : int = f.read()
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , """snapshots""" , UpperCamelCase__ , UpperCamelCase__ ) )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# File is cached at the same place the second time.
lowerCamelCase__ : Union[str, Any] = cached_file(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# Using a specific revision to test the full commit hash.
lowerCamelCase__ : str = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="""9b8c223""" )
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , """snapshots""" , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase_ ( self: List[Any] ):
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid model identifier""" ):
lowerCamelCase__ : Tuple = cached_file("""tiny-random-bert""" , UpperCamelCase__ )
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid git identifier""" ):
lowerCamelCase__ : List[str] = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="""aaaa""" )
with self.assertRaisesRegex(UpperCamelCase__ , """does not appear to have a file named""" ):
lowerCamelCase__ : str = cached_file(UpperCamelCase__ , """conf""" )
def lowerCamelCase_ ( self: Optional[int] ):
with self.assertRaisesRegex(UpperCamelCase__ , """does not appear to have a file named""" ):
lowerCamelCase__ : Any = cached_file(UpperCamelCase__ , """conf""" )
with open(os.path.join(UpperCamelCase__ , """refs""" , """main""" ) ) as f:
lowerCamelCase__ : Optional[int] = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , """.no_exist""" , UpperCamelCase__ , """conf""" ) ) )
lowerCamelCase__ : Optional[Any] = cached_file(UpperCamelCase__ , """conf""" , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
lowerCamelCase__ : Dict = cached_file(UpperCamelCase__ , """conf""" , local_files_only=UpperCamelCase__ , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = mock.Mock()
lowerCamelCase__ : str = 500
lowerCamelCase__ : List[str] = {}
lowerCamelCase__ : Union[str, Any] = HTTPError
lowerCamelCase__ : List[str] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=UpperCamelCase__ ) as mock_head:
lowerCamelCase__ : List[Any] = cached_file(UpperCamelCase__ , """conf""" , _raise_exceptions_for_connection_errors=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase_ ( self: Dict ):
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase__ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase__ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase__ ) )
def lowerCamelCase_ ( self: Optional[Any] ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , UpperCamelCase__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , UpperCamelCase__ , revision="""ahaha""" )
lowerCamelCase__ : Tuple = get_file_from_repo("""bert-base-cased""" , UpperCamelCase__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowerCamelCase__ : str = json.loads(open(UpperCamelCase__ , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 768 )
def lowerCamelCase_ ( self: List[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : int = Path(UpperCamelCase__ ) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(UpperCamelCase__ , """a.txt""" ) , str(UpperCamelCase__ ) )
self.assertIsNone(get_file_from_repo(UpperCamelCase__ , """b.txt""" ) )
| 41 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "roberta-prelayernorm"
def __init__( self : Optional[Any] , __lowerCamelCase : List[Any]=5_0265 , __lowerCamelCase : str=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : str=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Dict=512 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : List[Any]=1e-12 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : Any=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]="absolute" , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=None , **__lowerCamelCase : Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = classifier_dropout
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
@property
def lowercase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 314 | 0 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Any:
snake_case : Dict = nn.functional.normalize(lowercase )
snake_case : str = nn.functional.normalize(lowercase )
return torch.mm(lowercase ,normalized_text_embeds.t() )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = CLIPConfig
_snake_case = ["""CLIPEncoderLayer"""]
def __init__( self , A ) -> Optional[Any]:
super().__init__(A )
snake_case : List[Any] = CLIPVisionModel(config.vision_config )
snake_case : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=A )
snake_case : Optional[int] = nn.Parameter(torch.ones(1_7 , config.projection_dim ) , requires_grad=A )
snake_case : Optional[int] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=A )
snake_case : List[Any] = nn.Parameter(torch.ones(1_7 ) , requires_grad=A )
snake_case : Optional[int] = nn.Parameter(torch.ones(3 ) , requires_grad=A )
@torch.no_grad()
def UpperCAmelCase ( self , A , A ) -> Optional[Any]:
snake_case : List[str] = self.vision_model(A )[1] # pooled_output
snake_case : Union[str, Any] = self.visual_projection(A )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case : str = cosine_distance(A , self.special_care_embeds ).cpu().float().numpy()
snake_case : Dict = cosine_distance(A , self.concept_embeds ).cpu().float().numpy()
snake_case : Optional[Any] = []
snake_case : int = image_embeds.shape[0]
for i in range(A ):
snake_case : List[Any] = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case : Tuple = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
snake_case : Tuple = special_cos_dist[i][concept_idx]
snake_case : Optional[int] = self.special_care_embeds_weights[concept_idx].item()
snake_case : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
snake_case : Tuple = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
snake_case : str = cos_dist[i][concept_idx]
snake_case : str = self.concept_embeds_weights[concept_idx].item()
snake_case : Optional[Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(A )
result.append(A )
snake_case : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase ( self , A , A ) -> Union[str, Any]:
snake_case : str = self.vision_model(A )[1] # pooled_output
snake_case : Any = self.visual_projection(A )
snake_case : Optional[Any] = cosine_distance(A , self.special_care_embeds )
snake_case : Tuple = cosine_distance(A , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case : Union[str, Any] = 0.0
snake_case : List[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case : List[Any] = torch.any(special_scores > 0 , dim=1 )
snake_case : Optional[Any] = special_care * 0.01
snake_case : Optional[Any] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
snake_case : Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case : Union[str, Any] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 363 |
from collections import defaultdict
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> bool:
snake_case : List[str] = first_str.lower().strip()
snake_case : List[str] = second_str.lower().strip()
# Remove whitespace
snake_case : Any = first_str.replace(""" """ ,"""""" )
snake_case : List[str] = second_str.replace(""" """ ,"""""" )
# Strings of different lengths are not anagrams
if len(lowercase ) != len(lowercase ):
return False
# Default values for count should be 0
snake_case : defaultdict[str, int] = defaultdict(lowercase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowercase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCamelCase : List[Any] = input('Enter the first string ').strip()
lowerCamelCase : Optional[int] = input('Enter the second string ').strip()
lowerCamelCase : Optional[Any] = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 176 | 0 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowercase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = 1.5
__UpperCAmelCase : List[str] = int(factor * num_class_images )
__UpperCAmelCase : Tuple = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=lowerCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=lowerCAmelCase__ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
__UpperCAmelCase : List[Any] = client.query(text=lowerCAmelCase__ )
if len(lowerCAmelCase__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
__UpperCAmelCase : int = int(factor * num_images )
__UpperCAmelCase : Any = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=lowerCAmelCase__ , aesthetic_weight=0.1 , )
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : List[Any] = tqdm(desc="""downloading real regularization images""" , total=lowerCAmelCase__ )
with open(f'{class_data_dir}/caption.txt' , """w""" ) as fa, open(f'{class_data_dir}/urls.txt' , """w""" ) as fa, open(
f'{class_data_dir}/images.txt' , """w""" ) as fa:
while total < num_class_images:
__UpperCAmelCase : Any = class_images[count]
count += 1
try:
__UpperCAmelCase : int = requests.get(images["""url"""] )
if img.status_code == 200:
__UpperCAmelCase : List[str] = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : List[str] = argparse.ArgumentParser("""""" , add_help=lowerCAmelCase__ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=lowerCAmelCase__ , type=lowerCAmelCase__ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=lowerCAmelCase__ , type=lowerCAmelCase__ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=lowerCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
_UpperCamelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 254 |
'''simple docstring'''
from pathlib import Path
import fire
def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : List[str] = Path(lowerCAmelCase__ )
__UpperCAmelCase : str = Path(lowerCAmelCase__ )
dest_dir.mkdir(exist_ok=lowerCAmelCase__ )
for path in src_dir.iterdir():
__UpperCAmelCase : str = [x.rstrip() for x in list(path.open().readlines() )][:n]
__UpperCAmelCase : Optional[int] = dest_dir.joinpath(path.name )
print(lowerCAmelCase__ )
dest_path.open("""w""" ).write("""\n""".join(lowerCAmelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 254 | 1 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int] ) ->str:
A__ : Dict = 1.5
A__ : int = int(factor * num_class_images )
A__ : Tuple = ClipClient(
url="""https://knn.laion.ai/knn-service""", indice_name="""laion_400m""", num_images=SCREAMING_SNAKE_CASE_, aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images', exist_ok=SCREAMING_SNAKE_CASE_ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
A__ : str = client.query(text=SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) >= factor * num_class_images or num_images > 1e4:
break
else:
A__ : List[str] = int(factor * num_images )
A__ : Optional[Any] = ClipClient(
url="""https://knn.laion.ai/knn-service""", indice_name="""laion_400m""", num_images=SCREAMING_SNAKE_CASE_, aesthetic_weight=0.1, )
A__ : int = 0
A__ : Tuple = 0
A__ : Tuple = tqdm(desc="""downloading real regularization images""", total=SCREAMING_SNAKE_CASE_ )
with open(f'{class_data_dir}/caption.txt', """w""" ) as fa, open(f'{class_data_dir}/urls.txt', """w""" ) as fa, open(
f'{class_data_dir}/images.txt', """w""" ) as fa:
while total < num_class_images:
A__ : Tuple = class_images[count]
count += 1
try:
A__ : int = requests.get(images["""url"""] )
if img.status_code == 2_0_0:
A__ : int = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg', """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _lowerCAmelCase ( ) ->List[str]:
A__ : int = argparse.ArgumentParser("""""", add_help=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--class_prompt""", help="""text prompt to retrieve images""", required=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--class_data_dir""", help="""path to save images""", required=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--num_class_images""", help="""number of images to download""", default=2_0_0, type=SCREAMING_SNAKE_CASE_ )
return parser.parse_args()
if __name__ == "__main__":
A_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 366 |
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
A_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
A_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
A_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _UpperCamelCase ( self : List[str] , snake_case : Dict , snake_case : List[Any] , snake_case : List[str]=None , snake_case : List[Any]="uniform_average" , snake_case : int=True ):
'''simple docstring'''
A__ : Optional[int] = mean_squared_error(
snake_case , snake_case , sample_weight=snake_case , multioutput=snake_case , squared=snake_case )
return {"mse": mse}
| 296 | 0 |
def __UpperCAmelCase ( __a : int ) -> None:
"""simple docstring"""
_a : int = generate_pascal_triangle(__a )
for row_idx in range(__a ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=''' ''' )
else:
print(triangle[row_idx][col_idx] ,end='''''' )
print()
def __UpperCAmelCase ( __a : int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__a ,__a ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
_a : list[list[int]] = []
for current_row_idx in range(__a ):
_a : Any = populate_current_row(__a ,__a )
triangle.append(__a )
return triangle
def __UpperCAmelCase ( __a : list[list[int]] ,__a : int ) -> list[int]:
"""simple docstring"""
_a : str = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_a , _a : Dict = 1, 1
for current_col_idx in range(1 ,__a ):
calculate_current_element(
__a ,__a ,__a ,__a )
return current_row
def __UpperCAmelCase ( __a : list[list[int]] ,__a : list[int] ,__a : int ,__a : int ,) -> None:
"""simple docstring"""
_a : Optional[int] = triangle[current_row_idx - 1][current_col_idx - 1]
_a : Tuple = triangle[current_row_idx - 1][current_col_idx]
_a : int = above_to_left_elt + above_to_right_elt
def __UpperCAmelCase ( __a : int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__a ,__a ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
_a : list[list[int]] = [[1]]
for row_index in range(1 ,__a ):
_a : List[Any] = [0] + result[-1] + [0]
_a : List[str] = row_index + 1
# Calculate the number of distinct elements in a row
_a : Any = sum(divmod(__a ,2 ) )
_a : Tuple = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
_a : int = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_a : Optional[Any] = row_first_half + row_second_half
result.append(__a )
return result
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__a : Callable ,__a : int ) -> None:
_a : Tuple = F"""{func.__name__}({value})"""
_a : Tuple = timeit(F"""__main__.{call}""" ,setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__a ,__a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 235 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=7 , _a=False , _a=True , _a=False , _a=False , _a=1_9 , _a=3_2 , _a=5 , _a=4 , _a=3_7 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=1_6 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> Union[str, Any]:
_a : Optional[Any] = parent
_a : Union[str, Any] = batch_size
_a : List[Any] = seq_length
_a : Dict = is_training
_a : int = use_input_mask
_a : str = use_token_type_ids
_a : Any = use_labels
_a : List[Any] = vocab_size
_a : Any = hidden_size
_a : int = num_hidden_layers
_a : str = num_attention_heads
_a : Dict = intermediate_size
_a : List[str] = hidden_act
_a : Optional[Any] = hidden_dropout_prob
_a : Optional[Any] = attention_probs_dropout_prob
_a : int = max_position_embeddings
_a : Tuple = type_vocab_size
_a : str = type_sequence_label_size
_a : Any = initializer_range
_a : Union[str, Any] = num_labels
_a : Dict = num_choices
_a : Union[str, Any] = scope
def __lowercase ( self ) -> List[Any]:
_a : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict = None
if self.use_input_mask:
_a : int = random_attention_mask([self.batch_size, self.seq_length] )
_a : List[Any] = None
_a : Tuple = None
_a : Any = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_a : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self ) -> str:
_a : Optional[int] = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_a , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> str:
_a : Union[str, Any] = EsmForProteinFolding(config=_a ).float()
model.to(_a )
model.eval()
_a : str = model(_a , attention_mask=_a )
_a : Union[str, Any] = model(_a )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def __lowercase ( self ) -> str:
_a : List[str] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Optional[Any] = config_and_inputs
_a : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Any = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = ()
UpperCAmelCase__ : int = {} if is_torch_available() else {}
UpperCAmelCase__ : Optional[int] = False
def __lowercase ( self ) -> List[Any]:
_a : Optional[int] = EsmFoldModelTester(self )
_a : Dict = ConfigTester(self , config_class=_a , hidden_size=3_7 )
def __lowercase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __lowercase ( self ) -> str:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
@unittest.skip('''Does not support attention outputs''' )
def __lowercase ( self ) -> int:
pass
@unittest.skip
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __lowercase ( self ) -> int:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def __lowercase ( self ) -> int:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> str:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> Any:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''ESMFold only has one output format.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def __lowercase ( self ) -> Dict:
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowercase ( self ) -> List[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@require_torch
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@slow
def __lowercase ( self ) -> Optional[int]:
_a : Dict = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
_a : Tuple = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_a : Optional[Any] = model(_a )['''positions''']
_a : Union[str, Any] = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _a , atol=1e-4 ) )
| 235 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : str = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
lowercase : Tuple = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowercase : int = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowercase : int = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
lowercase : Dict = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
lowercase : str = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
lowercase : List[Any] = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
lowercase : Union[str, Any] = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
lowercase : List[str] = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
lowercase : List[str] = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
lowercase : str = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
lowercase : List[str] = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
lowercase : Union[str, Any] = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
lowercase : Optional[int] = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
lowercase : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowercase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowercase : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowercase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowercase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowercase : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowercase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowercase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowercase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowercase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowercase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowercase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowercase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowercase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : Any = FLAX_MODEL_MAPPING
lowercase : Optional[Any] = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : Dict = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowercase : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : List[str] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowercase : Optional[int] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowercase : str = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : List[str] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase : Union[str, Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : Optional[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase : List[str] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : Union[str, Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowercase : Union[str, Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : Dict = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase : List[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : int = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowercase : Dict = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowercase : Optional[int] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : int = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase : Dict = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : Optional[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase : List[str] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : Optional[int] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowercase : Dict = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
) | 352 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Union[str, Any] = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 171 | 0 |
"""simple docstring"""
def __magic_name__ ( __snake_case : Any ) -> str:
lowercase , lowercase : Tuple = [], []
while len(__snake_case ) > 1:
lowercase , lowercase : List[Any] = min(__snake_case ), max(__snake_case )
start.append(__snake_case )
end.append(__snake_case )
collection.remove(__snake_case )
collection.remove(__snake_case )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_A : str = input("""Enter numbers separated by a comma:\n""").strip()
_A : Tuple = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 202 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Any ) -> Any:
# Initialise PyTorch model
lowercase : Union[str, Any] = RemBertConfig.from_json_file(__snake_case )
print("Building PyTorch model from configuration: {}".format(str(__snake_case ) ) )
lowercase : str = RemBertModel(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print("Save PyTorch model to {}".format(__snake_case ) )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
_A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_A : List[str] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 202 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase__ = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 83 |
'''simple docstring'''
class snake_case__ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : list[int] ) -> None:
"""simple docstring"""
snake_case : List[Any] = len(UpperCamelCase__ )
snake_case : Tuple = [0] * len_array
if len_array > 0:
snake_case : List[str] = array[0]
for i in range(1 , UpperCamelCase__ ):
snake_case : Tuple = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase ( self : str , UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
snake_case : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCamelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
__snake_case = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(a__ ) , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a__ ) , x.transpose() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = torch.tensor(a__ )
self.assertTrue(np.allclose(transpose(a__ ) , transpose(a__ ).numpy() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = torch.tensor(a__ )
self.assertTrue(np.allclose(transpose(a__ , axes=(1, 2, 0) ) , transpose(a__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def a (self : Any ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = tf.constant(a__ )
self.assertTrue(np.allclose(transpose(a__ ) , transpose(a__ ).numpy() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = tf.constant(a__ )
self.assertTrue(np.allclose(transpose(a__ , axes=(1, 2, 0) ) , transpose(a__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def a (self : Any ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = jnp.array(a__ )
self.assertTrue(np.allclose(transpose(a__ ) , np.asarray(transpose(a__ ) ) ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = jnp.array(a__ )
self.assertTrue(np.allclose(transpose(a__ , axes=(1, 2, 0) ) , np.asarray(transpose(a__ , axes=(1, 2, 0) ) ) ) )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a__ , (4, 3) ) , np.reshape(a__ , (4, 3) ) ) )
__snake_case = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a__ , (12, 5) ) , np.reshape(a__ , (12, 5) ) ) )
@require_torch
def a (self : Dict ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = torch.tensor(a__ )
self.assertTrue(np.allclose(reshape(a__ , (4, 3) ) , reshape(a__ , (4, 3) ).numpy() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = torch.tensor(a__ )
self.assertTrue(np.allclose(reshape(a__ , (12, 5) ) , reshape(a__ , (12, 5) ).numpy() ) )
@require_tf
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = tf.constant(a__ )
self.assertTrue(np.allclose(reshape(a__ , (4, 3) ) , reshape(a__ , (4, 3) ).numpy() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = tf.constant(a__ )
self.assertTrue(np.allclose(reshape(a__ , (12, 5) ) , reshape(a__ , (12, 5) ).numpy() ) )
@require_flax
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = jnp.array(a__ )
self.assertTrue(np.allclose(reshape(a__ , (4, 3) ) , np.asarray(reshape(a__ , (4, 3) ) ) ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = jnp.array(a__ )
self.assertTrue(np.allclose(reshape(a__ , (12, 5) ) , np.asarray(reshape(a__ , (12, 5) ) ) ) )
def a (self : Any ):
"""simple docstring"""
__snake_case = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a__ ) , np.squeeze(a__ ) ) )
__snake_case = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a__ , axis=2 ) , np.squeeze(a__ , axis=2 ) ) )
@require_torch
def a (self : Tuple ):
"""simple docstring"""
__snake_case = np.random.randn(1 , 3 , 4 )
__snake_case = torch.tensor(a__ )
self.assertTrue(np.allclose(squeeze(a__ ) , squeeze(a__ ).numpy() ) )
__snake_case = np.random.randn(1 , 4 , 1 , 5 )
__snake_case = torch.tensor(a__ )
self.assertTrue(np.allclose(squeeze(a__ , axis=2 ) , squeeze(a__ , axis=2 ).numpy() ) )
@require_tf
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = np.random.randn(1 , 3 , 4 )
__snake_case = tf.constant(a__ )
self.assertTrue(np.allclose(squeeze(a__ ) , squeeze(a__ ).numpy() ) )
__snake_case = np.random.randn(1 , 4 , 1 , 5 )
__snake_case = tf.constant(a__ )
self.assertTrue(np.allclose(squeeze(a__ , axis=2 ) , squeeze(a__ , axis=2 ).numpy() ) )
@require_flax
def a (self : List[str] ):
"""simple docstring"""
__snake_case = np.random.randn(1 , 3 , 4 )
__snake_case = jnp.array(a__ )
self.assertTrue(np.allclose(squeeze(a__ ) , np.asarray(squeeze(a__ ) ) ) )
__snake_case = np.random.randn(1 , 4 , 1 , 5 )
__snake_case = jnp.array(a__ )
self.assertTrue(np.allclose(squeeze(a__ , axis=2 ) , np.asarray(squeeze(a__ , axis=2 ) ) ) )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a__ , axis=1 ) , np.expand_dims(a__ , axis=1 ) ) )
@require_torch
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = torch.tensor(a__ )
self.assertTrue(np.allclose(expand_dims(a__ , axis=1 ) , expand_dims(a__ , axis=1 ).numpy() ) )
@require_tf
def a (self : Any ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = tf.constant(a__ )
self.assertTrue(np.allclose(expand_dims(a__ , axis=1 ) , expand_dims(a__ , axis=1 ).numpy() ) )
@require_flax
def a (self : int ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = jnp.array(a__ )
self.assertTrue(np.allclose(expand_dims(a__ , axis=1 ) , np.asarray(expand_dims(a__ , axis=1 ) ) ) )
| 24 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self, lowercase_, lowercase_=13, lowercase_=7, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=False, lowercase_=False, lowercase_=False, lowercase_=2, lowercase_=99, lowercase_=0, lowercase_=32, lowercase_=5, lowercase_=4, lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=12, lowercase_=2, lowercase_=0.02, lowercase_=3, lowercase_=4, lowercase_="last", lowercase_=None, lowercase_=None, ) -> List[Any]:
"""simple docstring"""
a__ =parent
a__ =batch_size
a__ =seq_length
a__ =is_training
a__ =use_input_lengths
a__ =use_token_type_ids
a__ =use_labels
a__ =gelu_activation
a__ =sinusoidal_embeddings
a__ =causal
a__ =asm
a__ =n_langs
a__ =vocab_size
a__ =n_special
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =type_sequence_label_size
a__ =initializer_range
a__ =num_labels
a__ =num_choices
a__ =summary_type
a__ =use_proj
a__ =scope
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
a__ =random_attention_mask([self.batch_size, self.seq_length] )
a__ =None
if self.use_input_lengths:
a__ =(
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
a__ =None
if self.use_token_type_ids:
a__ =ids_tensor([self.batch_size, self.seq_length], self.n_langs )
a__ =None
a__ =None
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size], self.type_sequence_label_size )
a__ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
a__ =ids_tensor([self.batch_size], 2 ).float()
a__ =ids_tensor([self.batch_size], self.num_choices )
a__ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Dict:
"""simple docstring"""
a__ =FlaubertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, lengths=lowercase_, langs=lowercase_ )
a__ =model(lowercase_, langs=lowercase_ )
a__ =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> str:
"""simple docstring"""
a__ =FlaubertWithLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, token_type_ids=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Dict:
"""simple docstring"""
a__ =FlaubertForQuestionAnsweringSimple(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_ )
a__ =model(lowercase_, start_positions=lowercase_, end_positions=lowercase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Optional[Any]:
"""simple docstring"""
a__ =FlaubertForQuestionAnswering(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_ )
a__ =model(
lowercase_, start_positions=lowercase_, end_positions=lowercase_, cls_index=lowercase_, is_impossible=lowercase_, p_mask=lowercase_, )
a__ =model(
lowercase_, start_positions=lowercase_, end_positions=lowercase_, cls_index=lowercase_, is_impossible=lowercase_, )
((a__), ) =result_with_labels.to_tuple()
a__ =model(lowercase_, start_positions=lowercase_, end_positions=lowercase_ )
((a__), ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Optional[Any]:
"""simple docstring"""
a__ =FlaubertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_ )
a__ =model(lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Optional[int]:
"""simple docstring"""
a__ =self.num_labels
a__ =FlaubertForTokenClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, attention_mask=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Dict:
"""simple docstring"""
a__ =self.num_choices
a__ =FlaubertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =model(
lowercase_, attention_mask=lowercase_, token_type_ids=lowercase_, labels=lowercase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.prepare_config_and_inputs()
(
(
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
),
) =config_and_inputs
a__ ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Dict = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_=False ) -> str:
"""simple docstring"""
a__ =super()._prepare_for_class(lowercase_, lowercase_, return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a__ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowercase_ )
a__ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowercase_ )
return inputs_dict
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
a__ =FlaubertModelTester(self )
a__ =ConfigTester(self, config_class=lowercase_, emb_dim=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase_ )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase_ )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase_ )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase_ )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase_ )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase_ )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase_ )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ =FlaubertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a__ =True
a__ =model_class(config=lowercase_ )
a__ =self._prepare_for_class(lowercase_, lowercase_ )
a__ =torch.jit.trace(
lowercase_, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase_, os.path.join(lowercase_, '''traced_model.pt''' ) )
a__ =torch.jit.load(os.path.join(lowercase_, '''traced_model.pt''' ), map_location=lowercase_ )
loaded(inputs_dict['''input_ids'''].to(lowercase_ ), inputs_dict['''attention_mask'''].to(lowercase_ ) )
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
a__ =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
a__ =model(lowercase_ )[0]
a__ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape, lowercase_ )
a__ =torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowercase_, atol=1E-4 ) )
| 188 | 0 |
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def UpperCamelCase_ ( *lowerCAmelCase__ : str ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase_ : str = list(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
lowerCAmelCase_ : Tuple = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def UpperCamelCase_ ( lowerCAmelCase__ : Exception ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = [
'CUDA out of memory.', # CUDA OOM
'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU
'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM
]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def UpperCamelCase_ ( lowerCAmelCase__ : callable = None , lowerCAmelCase__ : int = 128 ) -> Tuple:
"""simple docstring"""
if function is None:
return functools.partial(lowerCAmelCase__ , starting_batch_size=lowerCAmelCase__ )
lowerCAmelCase_ : str = starting_batch_size
def decorator(*lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : int ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
lowerCAmelCase_ : Any = list(inspect.signature(lowerCAmelCase__ ).parameters.keys() )
# Guard against user error
if len(lowerCAmelCase__ ) < (len(lowerCAmelCase__ ) + 1):
lowerCAmelCase_ : Union[str, Any] = ', '.join([f"{arg}={value}" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f"Batch size was passed into `{function.__name__}` as the first argument when called."
f"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" )
while True:
if batch_size == 0:
raise RuntimeError('No executable batch size found, reached zero.' )
try:
return function(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
except Exception as e:
if should_reduce_batch_size(lowerCAmelCase__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 289 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
lowercase__ : List[Any] = logging.getLogger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """masked_bert"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : int=7_6_8 , SCREAMING_SNAKE_CASE_ : Tuple=1_2 , SCREAMING_SNAKE_CASE_ : List[Any]=1_2 , SCREAMING_SNAKE_CASE_ : Tuple=3_0_7_2 , SCREAMING_SNAKE_CASE_ : List[str]="gelu" , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Any=1E-12 , SCREAMING_SNAKE_CASE_ : List[str]=0 , SCREAMING_SNAKE_CASE_ : Optional[int]="topK" , SCREAMING_SNAKE_CASE_ : Optional[int]="constant" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , **SCREAMING_SNAKE_CASE_ : Any , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = vocab_size
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Optional[int] = num_hidden_layers
lowerCAmelCase_ : Dict = num_attention_heads
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : Any = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : Any = max_position_embeddings
lowerCAmelCase_ : Dict = type_vocab_size
lowerCAmelCase_ : Tuple = initializer_range
lowerCAmelCase_ : List[Any] = layer_norm_eps
lowerCAmelCase_ : str = pruning_method
lowerCAmelCase_ : Optional[Any] = mask_init
lowerCAmelCase_ : int = mask_scale
| 289 | 1 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class UpperCamelCase__ :
def __init__( self :Union[str, Any] , _A :List[Any] , _A :int , _A :int ) -> List[str]:
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
__A = img
__A = img.shape[1]
__A = img.shape[0]
__A = dst_width
__A = dst_height
__A = self.src_w / self.dst_w
__A = self.src_h / self.dst_h
__A = __A = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def lowercase_ ( self :Any ) -> Optional[int]:
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__A = self.img[self.get_y(_A )][self.get_x(_A )]
def lowercase_ ( self :List[Any] , _A :int ) -> int:
'''simple docstring'''
return int(self.ratio_x * x )
def lowercase_ ( self :Optional[int] , _A :int ) -> int:
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
a__ , a__ : int = 8_0_0, 6_0_0
a__ : Dict = imread("image_data/lena.jpg", 1)
a__ : Optional[int] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 161 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a__ : List[Any] = logging.get_logger(__name__)
# TODO: upload to AWS
a__ : List[str] = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : Union[str, Any] = 'retribert'
def __init__( self :int , _A :str=30_522 , _A :Optional[int]=768 , _A :List[Any]=8 , _A :Tuple=12 , _A :Optional[int]=3_072 , _A :Union[str, Any]="gelu" , _A :List[str]=0.1 , _A :Tuple=0.1 , _A :List[Any]=512 , _A :Dict=2 , _A :Optional[int]=0.02 , _A :List[str]=1E-12 , _A :Optional[int]=True , _A :int=128 , _A :Tuple=0 , **_A :str , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_act
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = initializer_range
__A = layer_norm_eps
__A = share_encoders
__A = projection_dim
| 161 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : List[str] ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
snake_case__ : Tuple = [[1, 2, 4], [1, 2, 3, 4]]
snake_case__ : str = DisjunctiveConstraint(__snake_case )
self.assertTrue(isinstance(dc.token_ids , __snake_case ) )
with self.assertRaises(__snake_case ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__snake_case ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowerCamelCase ( self : int ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
snake_case__ : Dict = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__snake_case ):
DisjunctiveConstraint(__snake_case ) # fails here
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : int = [[1, 2, 3], [1, 2, 4]]
snake_case__ : Optional[int] = DisjunctiveConstraint(__snake_case )
snake_case__ , snake_case__ , snake_case__ : Optional[Any] = dc.update(1 )
snake_case__ : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(__snake_case )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : List[str] = dc.update(2 )
snake_case__ : List[str] = stepped is True and completed is False and reset is False
self.assertTrue(__snake_case )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : int = dc.update(3 )
snake_case__ : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(__snake_case )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowerCamelCase ( self : Any ):
snake_case__ : Tuple = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
snake_case__ : List[str] = DisjunctiveConstraint(__snake_case )
snake_case__ , snake_case__ , snake_case__ : int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Any = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
snake_case__ , snake_case__ , snake_case__ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
snake_case__ , snake_case__ , snake_case__ : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : List[str] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 370 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__a = logging.get_logger(__name__)
__a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__a = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__a = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
__a = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
__a = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
__a = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
__a = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
__a = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
__a = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
__a = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
__a = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_a )
class UpperCAmelCase_ :
"""simple docstring"""
def __call__( self : str , snake_case_ : Optional[Any] , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = None , snake_case_ : Union[bool, str] = False , snake_case_ : Union[bool, str] = False , snake_case_ : Optional[int] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : Optional[bool] = None , **snake_case_ : Union[str, Any] , ):
if titles is None and texts is None:
return super().__call__(
snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
elif titles is None or texts is None:
snake_case__ : int = titles if texts is None else texts
return super().__call__(
snake_case_ , snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
snake_case__ : List[str] = titles if not isinstance(snake_case_ , snake_case_ ) else [titles]
snake_case__ : Union[str, Any] = texts if not isinstance(snake_case_ , snake_case_ ) else [texts]
snake_case__ : Dict = len(snake_case_ )
snake_case__ : Union[str, Any] = questions if not isinstance(snake_case_ , snake_case_ ) else [questions] * n_passages
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
f"There should be as many titles than texts but got {len(snake_case_ )} titles and {len(snake_case_ )} texts." )
snake_case__ : int = super().__call__(snake_case_ , snake_case_ , padding=snake_case_ , truncation=snake_case_ )["""input_ids"""]
snake_case__ : Any = super().__call__(snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ )["""input_ids"""]
snake_case__ : Dict = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(snake_case_ , snake_case_ )
]
}
if return_attention_mask is not False:
snake_case__ : List[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
snake_case__ : Union[str, Any] = attention_mask
return self.pad(snake_case_ , padding=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : BatchEncoding , snake_case_ : DPRReaderOutput , snake_case_ : int = 16 , snake_case_ : int = 64 , snake_case_ : int = 4 , ):
snake_case__ : Optional[int] = reader_input["""input_ids"""]
snake_case__ , snake_case__ , snake_case__ : List[str] = reader_output[:3]
snake_case__ : Union[str, Any] = len(snake_case_ )
snake_case__ : Tuple = sorted(range(snake_case_ ) , reverse=snake_case_ , key=relevance_logits.__getitem__ )
snake_case__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
snake_case__ : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
snake_case__ : Optional[Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
snake_case__ : int = sequence_ids.index(self.pad_token_id )
else:
snake_case__ : int = len(snake_case_ )
snake_case__ : Optional[int] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=snake_case_ , top_spans=snake_case_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=snake_case_ , start_index=snake_case_ , end_index=snake_case_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(snake_case_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase ( self : str , snake_case_ : List[int] , snake_case_ : List[int] , snake_case_ : int , snake_case_ : int , ):
snake_case__ : List[str] = []
for start_index, start_score in enumerate(snake_case_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
snake_case__ : Any = sorted(snake_case_ , key=lambda snake_case_ : x[1] , reverse=snake_case_ )
snake_case__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
snake_case__ : Union[str, Any] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(snake_case_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class UpperCAmelCase_ ( _a , _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = READER_PRETRAINED_INIT_CONFIGURATION
lowercase = ["input_ids", "attention_mask"]
| 43 | 0 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def UpperCamelCase ( __lowerCamelCase : List[str] ):
snake_case : List[str] = []
for line in lines:
snake_case : List[Any] = re.sub(r"#.*" , "" , __lowerCamelCase ) # remove comments
if line:
filtered_lines.append(__lowerCamelCase )
snake_case : Optional[Any] = "\n".join(__lowerCamelCase )
# Make a hash from all this code
snake_case : Tuple = full_str.encode("utf-8" )
return shaaaa(__lowerCamelCase ).hexdigest()
# get importable module names and hash for caching
__lowerCamelCase = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__lowerCamelCase = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__lowerCamelCase = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
__lowerCamelCase = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 59 |
import numpy as np
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.array:
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod() | 189 | 0 |
from ....utils import logging
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_=None , A_=2048 ) -> Any:
__UpperCamelCase =config.__dict__
__UpperCamelCase =modal_hidden_size
if num_labels:
__UpperCamelCase =num_labels
| 117 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_A = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=None ):
require_version(deps[pkg] , SCREAMING_SNAKE_CASE__ )
| 117 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__a: Optional[int] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["pixel_values"]
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , **__lowerCAmelCase , ) -> Any:
super().__init__(**__UpperCAmelCase )
lowercase__ : Optional[Any] = size if size is not None else {'''shortest_edge''': 224}
lowercase__ : List[str] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
lowercase__ : Optional[int] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase__ : Union[str, Any] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' )
lowercase__ : str = do_resize
lowercase__ : List[Any] = size
lowercase__ : Dict = resample
lowercase__ : Any = do_center_crop
lowercase__ : Optional[int] = crop_size
lowercase__ : List[str] = do_rescale
lowercase__ : Optional[Any] = rescale_factor
lowercase__ : Dict = do_normalize
lowercase__ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : Dict = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : Union[str, Any] = do_convert_rgb
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> Tuple:
lowercase__ : Union[str, Any] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase__ : Tuple = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> Optional[Any]:
lowercase__ : int = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> int:
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> int:
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ) -> Any:
lowercase__ : Tuple = do_resize if do_resize is not None else self.do_resize
lowercase__ : Optional[int] = size if size is not None else self.size
lowercase__ : Union[str, Any] = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase )
lowercase__ : Any = resample if resample is not None else self.resample
lowercase__ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase__ : Tuple = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase )
lowercase__ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Any = image_mean if image_mean is not None else self.image_mean
lowercase__ : Any = image_std if image_std is not None else self.image_std
lowercase__ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : List[str] = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : List[str] = [convert_to_rgb(__UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : Optional[int] = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
lowercase__ : Optional[int] = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
lowercase__ : int = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
lowercase__ : str = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
lowercase__ : str = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
lowercase__ : int = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
lowercase__ : Any = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 198 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
a_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return {}, {}, {}
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = load_image(__UpperCAmelCase )
__lowerCamelCase = image.size
__lowerCamelCase = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.model(**__UpperCAmelCase )
return model_outputs
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = model_outputs.predicted_depth
__lowerCamelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=__UpperCAmelCase )
__lowerCamelCase = prediction.squeeze().cpu().numpy()
__lowerCamelCase = (output * 255 / np.max(__UpperCAmelCase )).astype('''uint8''' )
__lowerCamelCase = Image.fromarray(__UpperCAmelCase )
__lowerCamelCase = {}
__lowerCamelCase = predicted_depth
__lowerCamelCase = depth
return output_dict
| 330 | 0 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class lowerCamelCase (__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
super().__init__(*_snake_case , **_snake_case )
requires_backends(self , 'decord' )
self.check_model_type(_snake_case )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = {}
if frame_sampling_rate is not None:
UpperCAmelCase_ : Optional[int] = frame_sampling_rate
if num_frames is not None:
UpperCAmelCase_ : Union[str, Any] = num_frames
UpperCAmelCase_ : int = {}
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
return super().__call__(_snake_case , **_snake_case )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=1 ) -> Optional[Any]:
if num_frames is None:
UpperCAmelCase_ : Any = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
UpperCAmelCase_ : Optional[int] = BytesIO(requests.get(_snake_case ).content )
UpperCAmelCase_ : Tuple = VideoReader(_snake_case )
videoreader.seek(0 )
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Optional[Any] = num_frames * frame_sampling_rate - 1
UpperCAmelCase_ : Dict = np.linspace(_snake_case , _snake_case , num=_snake_case , dtype=np.intaa )
UpperCAmelCase_ : int = videoreader.get_batch(_snake_case ).asnumpy()
UpperCAmelCase_ : int = list(_snake_case )
UpperCAmelCase_ : Any = self.image_processor(_snake_case , return_tensors=self.framework )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
UpperCAmelCase_ : int = self.model(**_snake_case )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> Tuple:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : List[str] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : str = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase_ : Optional[Any] = probs.topk(_snake_case )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : int = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case )]
| 351 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : List[str] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCAmelCase_ : str = VideoClassificationPipeline(model=_UpperCamelCase , image_processor=_UpperCamelCase , top_k=2 )
UpperCAmelCase_ : List[str] = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
for example in examples:
UpperCAmelCase_ : str = video_classifier(_UpperCamelCase )
self.assertEqual(
_UpperCamelCase , [
{'score': ANY(_UpperCamelCase ), 'label': ANY(_UpperCamelCase )},
{'score': ANY(_UpperCamelCase ), 'label': ANY(_UpperCamelCase )},
] , )
@require_torch
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : str = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
UpperCAmelCase_ : Optional[Any] = VideoMAEFeatureExtractor(
size={'shortest_edge': 1_0} , crop_size={'height': 1_0, 'width': 1_0} )
UpperCAmelCase_ : str = pipeline(
'video-classification' , model=_UpperCamelCase , feature_extractor=_UpperCamelCase , frame_sampling_rate=4 )
UpperCAmelCase_ : Any = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCAmelCase_ : List[str] = video_classifier(_UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}] , )
UpperCAmelCase_ : Tuple = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
] , )
@require_tf
def __UpperCAmelCase ( self ) -> Dict:
pass
| 145 | 0 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase : str = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowercase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowercase : Optional[Any] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
lowercase : Optional[Any] = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def A_ ( A__ ) -> Optional[Any]:
a__ : List[str] = None
# source code of `config_class`
a__ : Tuple = inspect.getsource(A__ )
a__ : Any = _re_checkpoint.findall(A__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
a__ : Union[str, Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
a__ : str = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
a__ : Optional[int] = ckpt_name
break
return checkpoint
def A_ ( ) -> List[str]:
a__ : Tuple = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
a__ : Dict = get_checkpoint_from_config_class(A__ )
a__ : List[str] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A__ )
if len(A__ ) > 0:
a__ : Dict = '\n'.join(sorted(A__ ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 99 |
'''simple docstring'''
class __snake_case :
"""simple docstring"""
def __init__( self : int , lowerCamelCase : int , lowerCamelCase : int=None , lowerCamelCase : int=None ) -> str:
lowerCAmelCase_ : str = data
lowerCAmelCase_ : Optional[Any] = previous
lowerCAmelCase_ : int = next_node
def __str__( self : Any ) -> str:
return F'{self.data}'
def __lowercase ( self : Optional[Any] ) -> int:
return self.data
def __lowercase ( self : str ) -> List[str]:
return self.next
def __lowercase ( self : int ) -> Optional[int]:
return self.previous
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : Optional[int] ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = head
def __iter__( self : str ) -> Optional[Any]:
return self
def __lowercase ( self : Union[str, Any] ) -> Dict:
if not self.current:
raise StopIteration
else:
lowerCAmelCase_ : Dict = self.current.get_data()
lowerCAmelCase_ : Tuple = self.current.get_next()
return value
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> Any:
lowerCAmelCase_ : Optional[Any] = None # First node in list
lowerCAmelCase_ : Optional[Any] = None # Last node in list
def __str__( self : Optional[int] ) -> Dict:
lowerCAmelCase_ : str = self.head
lowerCAmelCase_ : Tuple = []
while current is not None:
nodes.append(current.get_data() )
lowerCAmelCase_ : str = current.get_next()
return " ".join(str(lowerCamelCase ) for node in nodes )
def __contains__( self : List[Any] , lowerCamelCase : int ) -> List[str]:
lowerCAmelCase_ : List[str] = self.head
while current:
if current.get_data() == value:
return True
lowerCAmelCase_ : List[Any] = current.get_next()
return False
def __iter__( self : str ) -> Optional[Any]:
return LinkedListIterator(self.head )
def __lowercase ( self : Dict ) -> Optional[int]:
if self.head:
return self.head.get_data()
return None
def __lowercase ( self : List[str] ) -> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def __lowercase ( self : Optional[Any] , lowerCamelCase : Node ) -> None:
if self.head is None:
lowerCAmelCase_ : Union[str, Any] = node
lowerCAmelCase_ : List[str] = node
else:
self.insert_before_node(self.head , lowerCamelCase )
def __lowercase ( self : Tuple , lowerCamelCase : Node ) -> None:
if self.head is None:
self.set_head(lowerCamelCase )
else:
self.insert_after_node(self.tail , lowerCamelCase )
def __lowercase ( self : Union[str, Any] , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : int = Node(lowerCamelCase )
if self.head is None:
self.set_head(lowerCamelCase )
else:
self.set_tail(lowerCamelCase )
def __lowercase ( self : Optional[Any] , lowerCamelCase : Node , lowerCamelCase : Node ) -> None:
lowerCAmelCase_ : Optional[int] = node
lowerCAmelCase_ : List[Any] = node.previous
if node.get_previous() is None:
lowerCAmelCase_ : Tuple = node_to_insert
else:
lowerCAmelCase_ : Dict = node_to_insert
lowerCAmelCase_ : Optional[int] = node_to_insert
def __lowercase ( self : Union[str, Any] , lowerCamelCase : Node , lowerCamelCase : Node ) -> None:
lowerCAmelCase_ : Optional[int] = node
lowerCAmelCase_ : Tuple = node.next
if node.get_next() is None:
lowerCAmelCase_ : Tuple = node_to_insert
else:
lowerCAmelCase_ : Tuple = node_to_insert
lowerCAmelCase_ : Optional[Any] = node_to_insert
def __lowercase ( self : Dict , lowerCamelCase : int , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : List[str] = 1
lowerCAmelCase_ : Tuple = Node(lowerCamelCase )
lowerCAmelCase_ : List[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCamelCase , lowerCamelCase )
return
current_position += 1
lowerCAmelCase_ : str = node.next
self.insert_after_node(self.tail , lowerCamelCase )
def __lowercase ( self : int , lowerCamelCase : int ) -> Node:
lowerCAmelCase_ : List[Any] = self.head
while node:
if node.get_data() == item:
return node
lowerCAmelCase_ : List[Any] = node.get_next()
raise Exception("""Node not found""" )
def __lowercase ( self : str , lowerCamelCase : str ) -> int:
if (node := self.get_node(lowerCamelCase )) is not None:
if node == self.head:
lowerCAmelCase_ : Any = self.head.get_next()
if node == self.tail:
lowerCAmelCase_ : Optional[int] = self.tail.get_previous()
self.remove_node_pointers(lowerCamelCase )
@staticmethod
def __lowercase ( lowerCamelCase : Node ) -> None:
if node.get_next():
lowerCAmelCase_ : Tuple = node.previous
if node.get_previous():
lowerCAmelCase_ : Any = node.next
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : Any = None
def __lowercase ( self : str ) -> Optional[Any]:
return self.head is None
def UpperCamelCase_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = '''layoutlmv3'''
def __init__( self :int , __magic_name__ :Optional[int]=5_0265 , __magic_name__ :Dict=768 , __magic_name__ :Optional[Any]=12 , __magic_name__ :Union[str, Any]=12 , __magic_name__ :Tuple=3072 , __magic_name__ :Tuple="gelu" , __magic_name__ :Union[str, Any]=0.1 , __magic_name__ :Optional[Any]=0.1 , __magic_name__ :List[str]=512 , __magic_name__ :Tuple=2 , __magic_name__ :Optional[int]=0.02 , __magic_name__ :Optional[int]=1E-5 , __magic_name__ :Tuple=1 , __magic_name__ :Optional[int]=0 , __magic_name__ :List[str]=2 , __magic_name__ :int=1024 , __magic_name__ :Optional[Any]=128 , __magic_name__ :List[Any]=128 , __magic_name__ :str=True , __magic_name__ :int=32 , __magic_name__ :str=128 , __magic_name__ :Tuple=64 , __magic_name__ :Optional[Any]=256 , __magic_name__ :Optional[int]=True , __magic_name__ :List[str]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :List[Any]=224 , __magic_name__ :Any=3 , __magic_name__ :Optional[int]=16 , __magic_name__ :str=None , **__magic_name__ :Optional[int] , ):
'''simple docstring'''
super().__init__(
vocab_size=__magic_name__ , hidden_size=__magic_name__ , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , intermediate_size=__magic_name__ , hidden_act=__magic_name__ , hidden_dropout_prob=__magic_name__ , attention_probs_dropout_prob=__magic_name__ , max_position_embeddings=__magic_name__ , type_vocab_size=__magic_name__ , initializer_range=__magic_name__ , layer_norm_eps=__magic_name__ , pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ , )
a = max_ad_position_embeddings
a = coordinate_size
a = shape_size
a = has_relative_attention_bias
a = rel_pos_bins
a = max_rel_pos
a = has_spatial_attention_bias
a = rel_ad_pos_bins
a = max_rel_ad_pos
a = text_embed
a = visual_embed
a = input_size
a = num_channels
a = patch_size
a = classifier_dropout
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = version.parse('''1.12''' )
@property
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
return 1E-5
@property
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
return 12
def lowerCamelCase__ ( self :str , __magic_name__ :"ProcessorMixin" , __magic_name__ :int = -1 , __magic_name__ :int = -1 , __magic_name__ :bool = False , __magic_name__ :Optional["TensorType"] = None , __magic_name__ :int = 3 , __magic_name__ :int = 40 , __magic_name__ :int = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , """apply_ocr""" , __magic_name__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a = processor.tokenizer.num_special_tokens_to_add(__magic_name__ )
a = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
a = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
a = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
a = self._generate_dummy_images(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
a = dict(
processor(
__magic_name__ , text=__magic_name__ , boxes=__magic_name__ , return_tensors=__magic_name__ , ) )
return inputs
| 347 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCAmelCase :
def __init__( self :Optional[int] , __magic_name__ :str , __magic_name__ :int=2 , __magic_name__ :List[str]=3 , __magic_name__ :Optional[int]=4 , __magic_name__ :str=2 , __magic_name__ :Any=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Dict=True , __magic_name__ :List[Any]=99 , __magic_name__ :Dict=36 , __magic_name__ :Optional[Any]=3 , __magic_name__ :str=4 , __magic_name__ :Optional[Any]=37 , __magic_name__ :Dict="gelu" , __magic_name__ :Any=0.1 , __magic_name__ :Union[str, Any]=0.1 , __magic_name__ :Dict=512 , __magic_name__ :str=16 , __magic_name__ :List[Any]=2 , __magic_name__ :Tuple=0.02 , __magic_name__ :Any=6 , __magic_name__ :Optional[int]=6 , __magic_name__ :Tuple=3 , __magic_name__ :str=4 , __magic_name__ :List[str]=None , __magic_name__ :str=1000 , ):
'''simple docstring'''
a = parent
a = batch_size
a = num_channels
a = image_size
a = patch_size
a = text_seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = coordinate_size
a = shape_size
a = num_labels
a = num_choices
a = scope
a = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a = text_seq_length
a = (image_size // patch_size) ** 2 + 1
a = self.text_seq_length + self.image_seq_length
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
a = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a = bbox[i, j, 3]
a = bbox[i, j, 1]
a = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a = bbox[i, j, 2]
a = bbox[i, j, 0]
a = t
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.text_seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
a = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase__ ( self :int , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :List[str] , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int ):
'''simple docstring'''
a = LayoutLMvaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# text + image
a = model(__magic_name__ , pixel_values=__magic_name__ )
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , token_type_ids=__magic_name__ )
a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
a = model(__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
a = model(pixel_values=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :List[Any] , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :List[str] , __magic_name__ :List[str] ):
'''simple docstring'''
a = self.num_labels
a = LayoutLMvaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Dict , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :List[str] , __magic_name__ :Tuple ):
'''simple docstring'''
a = self.num_labels
a = LayoutLMvaForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :str , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :str , __magic_name__ :List[str] , __magic_name__ :Optional[int] , __magic_name__ :Optional[Any] ):
'''simple docstring'''
a = LayoutLMvaForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :List[Any] ):
'''simple docstring'''
return True
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = LayoutLMvaModelTester(self )
a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :Any=False ):
'''simple docstring'''
a = copy.deepcopy(__magic_name__ )
if model_class in get_values(__magic_name__ ):
a = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__magic_name__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__magic_name__ ):
a = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
elif model_class in get_values(__magic_name__ ):
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
elif model_class in [
*get_values(__magic_name__ ),
]:
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
elif model_class in [
*get_values(__magic_name__ ),
]:
a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__magic_name__ , )
return inputs_dict
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a = type
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@slow
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = LayoutLMvaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __A ( ) -> str:
a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(__magic_name__ )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=__magic_name__ , return_tensors="""pt""" ).pixel_values.to(__magic_name__ )
a = torch.tensor([[1, 2]] )
a = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
a = model(
input_ids=input_ids.to(__magic_name__ ) , bbox=bbox.to(__magic_name__ ) , pixel_values=pixel_values.to(__magic_name__ ) , )
# verify the logits
a = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ )
a = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 347 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ : Any = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class _UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ : str = BartphoTokenizer
lowercase_ : int = False
lowercase_ : str = True
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().setUp()
A_ : List[Any] = ['▁This', '▁is', '▁a', '▁t', 'est']
A_ : Optional[int] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
A_ : List[Any] = {'unk_token': '<unk>'}
A_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
A_ : Union[str, Any] = BartphoTokenizer(snake_case_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self , **snake_case_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Dict = 'This is a là test'
A_ : List[Any] = 'This is a<unk><unk> test'
return input_text, output_text
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = BartphoTokenizer(snake_case_ , self.monolingual_vocab_file , **self.special_tokens_map )
A_ : Optional[Any] = 'This is a là test'
A_ : List[Any] = '▁This ▁is ▁a ▁l à ▁t est'.split()
A_ : int = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
A_ : Tuple = tokens + [tokenizer.unk_token]
A_ : Dict = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ ) | 286 |
"""simple docstring"""
from copy import deepcopy
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ = None , snake_case_ = None ):
"""simple docstring"""
if arr is None and size is not None:
A_ : Union[str, Any] = size
A_ : List[str] = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = len(snake_case_ )
A_ : Optional[int] = deepcopy(snake_case_ )
for i in range(1 , self.size ):
A_ : Optional[Any] = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
A_ : Optional[int] = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCamelCase_ ( snake_case_ ):
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def lowerCamelCase_ ( snake_case_ ):
"""simple docstring"""
return index - (index & (-index))
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
A_ : List[str] = self.next_(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
self.add(snake_case_ , value - self.get(snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if right == 0:
return 0
A_ : Any = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
A_ : Tuple = self.prev(snake_case_ )
return result
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
return self.query(snake_case_ , index + 1 )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
A_ : List[Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
A_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 | 1 |
import qiskit
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
__UpperCamelCase :str = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__UpperCamelCase :str = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'Total count for various states are: {single_qubit_measure(1, 1)}')
| 362 | import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :int = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=SCREAMING_SNAKE_CASE , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=SCREAMING_SNAKE_CASE , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=SCREAMING_SNAKE_CASE )
return parser.parse_args()
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = parse_args()
# Import training_script as a module.
__UpperCamelCase :Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__UpperCamelCase :Any = script_fpath.stem
__UpperCamelCase :Union[str, Any] = importlib.import_module(SCREAMING_SNAKE_CASE )
# Patch sys.argv
__UpperCamelCase :Dict = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 105 | 0 |
from __future__ import annotations
import pandas as pd
def __lowercase ( _A , _A , _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : int = [0] * no_of_processes
SCREAMING_SNAKE_CASE : Union[str, Any] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : int = burst_time[i]
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Tuple = 999999999
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Tuple = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(UpperCamelCase__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
SCREAMING_SNAKE_CASE : Optional[int] = remaining_time[j]
SCREAMING_SNAKE_CASE : List[Any] = j
SCREAMING_SNAKE_CASE : List[str] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
SCREAMING_SNAKE_CASE : str = remaining_time[short]
if minm == 0:
SCREAMING_SNAKE_CASE : Optional[Any] = 999999999
if remaining_time[short] == 0:
complete += 1
SCREAMING_SNAKE_CASE : Optional[Any] = False
# Find finish time of current process
SCREAMING_SNAKE_CASE : Any = increment_time + 1
# Calculate waiting time
SCREAMING_SNAKE_CASE : Optional[Any] = finish_time - arrival_time[short]
SCREAMING_SNAKE_CASE : Optional[int] = finar - burst_time[short]
if waiting_time[short] < 0:
SCREAMING_SNAKE_CASE : str = 0
# Increment time
increment_time += 1
return waiting_time
def __lowercase ( _A , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = [0] * no_of_processes
for i in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
def __lowercase ( _A , _A , _A ) -> int:
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = 0
for i in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Any = total_waiting_time + waiting_time[i]
SCREAMING_SNAKE_CASE : Tuple = total_turn_around_time + turn_around_time[i]
print(F"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
UpperCAmelCase__ : Tuple = int(input())
UpperCAmelCase__ : Optional[Any] = [0] * no_of_processes
UpperCAmelCase__ : Optional[Any] = [0] * no_of_processes
UpperCAmelCase__ : Optional[int] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = map(int, input().split())
UpperCAmelCase__ : Optional[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCAmelCase__ : int = burst_time
UpperCAmelCase__ : Union[str, Any] = no_of_processes
UpperCAmelCase__ : List[Any] = waiting_time
UpperCAmelCase__ : List[str] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
UpperCAmelCase__ : Dict = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 245 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =AlbertConfig.from_json_file(UpperCamelCase__ )
print(f"Building PyTorch model from configuration: {config}" )
SCREAMING_SNAKE_CASE__ : Any =AlbertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict(), UpperCamelCase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path) | 152 | 0 |
from collections import defaultdict
class lowerCamelCase_ :
def __init__( self : str ,__lowerCamelCase : List[Any] ,__lowerCamelCase : int ):
'''simple docstring'''
a = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
a = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(__lowerCamelCase ) )
]
a = defaultdict(__lowerCamelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
a = (1 << len(__lowerCamelCase )) - 1
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : List[str] ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
a = self.count_ways_until(__lowerCamelCase ,task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) ,task_no + 1 )
# save the value.
a = total_ways_util
return self.dp[mask][task_no]
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : List[Any] ):
'''simple docstring'''
for i in range(len(__lowerCamelCase ) ):
for j in task_performed[i]:
self.task[j].append(__lowerCamelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 ,1 )
if __name__ == "__main__":
UpperCamelCase__ : Tuple = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
UpperCamelCase__ : int = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 330 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a_ )
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
SCREAMING_SNAKE_CASE_ = Features({'text': Value('string' )} )
SCREAMING_SNAKE_CASE_ = Features({} )
SCREAMING_SNAKE_CASE_ = "text"
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
return {self.text_column: "text"}
| 330 | 1 |
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {} # Mapping from char to TrieNode
__SCREAMING_SNAKE_CASE = False
def _A ( self , _A ):
'''simple docstring'''
for word in words:
self.insert(_A )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self
for char in word:
if char not in curr.nodes:
__SCREAMING_SNAKE_CASE = TrieNode()
__SCREAMING_SNAKE_CASE = curr.nodes[char]
__SCREAMING_SNAKE_CASE = True
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self
for char in word:
if char not in curr.nodes:
return False
__SCREAMING_SNAKE_CASE = curr.nodes[char]
return curr.is_leaf
def _A ( self , _A ):
'''simple docstring'''
def _delete(_A , _A , _A ) -> bool:
if index == len(_A ):
# If word does not exist
if not curr.is_leaf:
return False
__SCREAMING_SNAKE_CASE = False
return len(curr.nodes ) == 0
__SCREAMING_SNAKE_CASE = word[index]
__SCREAMING_SNAKE_CASE = curr.nodes.get(_A )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
__SCREAMING_SNAKE_CASE = _delete(_A , _A , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _A , 0 )
def __lowercase ( a__ , a__ ) -> None:
if node.is_leaf:
print(a__ , end=' ' )
for key, value in node.nodes.items():
print_words(a__ , word + key )
def __lowercase ( ) -> bool:
__SCREAMING_SNAKE_CASE = 'banana bananas bandana band apple all beast'.split()
__SCREAMING_SNAKE_CASE = TrieNode()
root.insert_many(a__ )
# print_words(root, "")
assert all(root.find(a__ ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __lowercase ( a__ , a__ ) -> None:
print(str(a__ ) , 'works!' if passes else 'doesn\'t work :(' )
def __lowercase ( ) -> None:
assert test_trie()
def __lowercase ( ) -> None:
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 257 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Any =logging.get_logger(__name__)
lowerCAmelCase__ : str ={
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Dict = '''unispeech-sat'''
def __init__( self , _A=32 , _A=768 , _A=12 , _A=12 , _A=3_072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.1 , _A=0.1 , _A=0.0_2 , _A=1e-5 , _A="group" , _A="gelu" , _A=(512, 512, 512, 512, 512, 512, 512) , _A=(5, 2, 2, 2, 2, 2, 2) , _A=(10, 3, 3, 3, 3, 2, 2) , _A=False , _A=128 , _A=16 , _A=False , _A=True , _A=0.0_5 , _A=10 , _A=2 , _A=0.0 , _A=10 , _A=0 , _A=320 , _A=2 , _A=0.1 , _A=100 , _A=256 , _A=256 , _A=0.1 , _A="mean" , _A=False , _A=False , _A=256 , _A=(512, 512, 512, 512, 1_500) , _A=(5, 3, 3, 1, 1) , _A=(1, 2, 3, 1, 1) , _A=512 , _A=0 , _A=1 , _A=2 , _A=504 , **_A , ):
'''simple docstring'''
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = num_clusters
__SCREAMING_SNAKE_CASE = do_stable_layer_norm
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__SCREAMING_SNAKE_CASE = num_codevectors_per_group
__SCREAMING_SNAKE_CASE = num_codevector_groups
__SCREAMING_SNAKE_CASE = contrastive_logits_temperature
__SCREAMING_SNAKE_CASE = feat_quantizer_dropout
__SCREAMING_SNAKE_CASE = num_negatives
__SCREAMING_SNAKE_CASE = codevector_dim
__SCREAMING_SNAKE_CASE = proj_codevector_dim
__SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def _A ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 257 | 1 |
_lowerCamelCase : int = frozenset(
[
'''prompt''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
_lowerCamelCase : Dict = frozenset(['''prompt''', '''negative_prompt'''])
_lowerCamelCase : List[Any] = frozenset([])
_lowerCamelCase : Union[str, Any] = frozenset(['''image'''])
_lowerCamelCase : List[str] = frozenset(
[
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
_lowerCamelCase : List[Any] = frozenset(['''image'''])
_lowerCamelCase : Tuple = frozenset(
[
'''prompt''',
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
_lowerCamelCase : Any = frozenset(['''prompt''', '''image''', '''negative_prompt'''])
_lowerCamelCase : List[Any] = frozenset(
[
# Text guided image variation with an image mask
'''prompt''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
_lowerCamelCase : Tuple = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt'''])
_lowerCamelCase : Union[str, Any] = frozenset(
[
# image variation with an image mask
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
_lowerCamelCase : Any = frozenset(['''image''', '''mask_image'''])
_lowerCamelCase : Dict = frozenset(
[
'''example_image''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
_lowerCamelCase : str = frozenset(['''example_image''', '''image''', '''mask_image'''])
_lowerCamelCase : int = frozenset(['''class_labels'''])
_lowerCamelCase : List[Any] = frozenset(['''class_labels'''])
_lowerCamelCase : str = frozenset(['''batch_size'''])
_lowerCamelCase : Union[str, Any] = frozenset([])
_lowerCamelCase : Any = frozenset(['''batch_size'''])
_lowerCamelCase : Union[str, Any] = frozenset([])
_lowerCamelCase : Optional[int] = frozenset(
[
'''prompt''',
'''audio_length_in_s''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
_lowerCamelCase : List[Any] = frozenset(['''prompt''', '''negative_prompt'''])
_lowerCamelCase : Optional[Any] = frozenset(['''input_tokens'''])
_lowerCamelCase : Optional[int] = frozenset(['''input_tokens''']) | 130 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Dict = '''PoolFormerConfig'''
# Base docstring
_lowerCamelCase : int = '''sail/poolformer_s12'''
_lowerCamelCase : Optional[Any] = [1, 512, 7, 7]
# Image classification docstring
_lowerCamelCase : Optional[int] = '''sail/poolformer_s12'''
_lowerCamelCase : List[Any] = '''tabby, tabby cat'''
_lowerCamelCase : List[str] = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def a_ ( __lowercase : List[Any] , __lowercase : float = 0.0 , __lowercase : bool = False ) -> Optional[int]:
if drop_prob == 0.0 or not training:
return input
_snake_case = 1 - drop_prob
_snake_case = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_snake_case = keep_prob + torch.rand(__lowercase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
_snake_case = input.div(__lowercase ) * random_tensor
return output
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Optional[float] = None ):
'''simple docstring'''
super().__init__()
_snake_case = drop_prob
def A ( self : Any , lowercase : torch.Tensor ):
'''simple docstring'''
return drop_path(lowercase , self.drop_prob , self.training )
def A ( self : Tuple ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Dict , lowercase : Dict , lowercase : str , lowercase : int , lowercase : Optional[Any] , lowercase : str=None ):
'''simple docstring'''
super().__init__()
_snake_case = patch_size if isinstance(lowercase , collections.abc.Iterable ) else (patch_size, patch_size)
_snake_case = stride if isinstance(lowercase , collections.abc.Iterable ) else (stride, stride)
_snake_case = padding if isinstance(lowercase , collections.abc.Iterable ) else (padding, padding)
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=lowercase )
_snake_case = norm_layer(lowercase ) if norm_layer else nn.Identity()
def A ( self : int , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.projection(lowercase )
_snake_case = self.norm(lowercase )
return embeddings
class SCREAMING_SNAKE_CASE__ ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self : Dict , lowercase : List[Any] , **lowercase : str ):
'''simple docstring'''
super().__init__(1 , lowercase , **lowercase )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowercase : List[Any] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.AvgPoolad(lowercase , stride=1 , padding=pool_size // 2 , count_include_pad=lowercase )
def A ( self : int , lowercase : List[str] ):
'''simple docstring'''
return self.pool(lowercase ) - hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Tuple , lowercase : str , lowercase : Optional[Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , 1 )
_snake_case = nn.Convad(lowercase , lowercase , 1 )
_snake_case = PoolFormerDropPath(lowercase )
if isinstance(config.hidden_act , lowercase ):
_snake_case = ACTaFN[config.hidden_act]
else:
_snake_case = config.hidden_act
def A ( self : Optional[int] , lowercase : str ):
'''simple docstring'''
_snake_case = self.conva(lowercase )
_snake_case = self.act_fn(lowercase )
_snake_case = self.drop(lowercase )
_snake_case = self.conva(lowercase )
_snake_case = self.drop(lowercase )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , lowercase : Tuple , lowercase : int , lowercase : str , lowercase : Union[str, Any] , lowercase : str , lowercase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case = PoolFormerPooling(lowercase )
_snake_case = PoolFormerOutput(lowercase , lowercase , lowercase , lowercase )
_snake_case = PoolFormerGroupNorm(lowercase )
_snake_case = PoolFormerGroupNorm(lowercase )
# Useful for training neural nets
_snake_case = PoolFormerDropPath(lowercase ) if drop_path > 0.0 else nn.Identity()
_snake_case = config.use_layer_scale
if config.use_layer_scale:
_snake_case = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowercase) ) , requires_grad=lowercase )
_snake_case = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowercase) ) , requires_grad=lowercase )
def A ( self : Optional[int] , lowercase : Union[str, Any] ):
'''simple docstring'''
if self.use_layer_scale:
_snake_case = self.pooling(self.before_norm(lowercase ) )
_snake_case = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_snake_case = hidden_states + self.drop_path(lowercase )
_snake_case = ()
_snake_case = self.output(self.after_norm(lowercase ) )
_snake_case = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_snake_case = hidden_states + self.drop_path(lowercase )
_snake_case = (output,) + outputs
return outputs
else:
_snake_case = self.drop_path(self.pooling(self.before_norm(lowercase ) ) )
# First residual connection
_snake_case = pooling_output + hidden_states
_snake_case = ()
# Second residual connection inside the PoolFormerOutput block
_snake_case = self.drop_path(self.output(self.after_norm(lowercase ) ) )
_snake_case = hidden_states + layer_output
_snake_case = (output,) + outputs
return outputs
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
super().__init__()
_snake_case = config
# stochastic depth decay rule
_snake_case = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_snake_case = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_snake_case = nn.ModuleList(lowercase )
# Transformer blocks
_snake_case = []
_snake_case = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_snake_case = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
lowercase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(lowercase ) )
_snake_case = nn.ModuleList(lowercase )
def A ( self : Any , lowercase : List[str] , lowercase : str=False , lowercase : Tuple=True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
_snake_case = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_snake_case , _snake_case = layers
# Get patch embeddings from hidden_states
_snake_case = embedding_layer(lowercase )
# Send the embeddings through the blocks
for _, blk in enumerate(lowercase ):
_snake_case = blk(lowercase )
_snake_case = layer_outputs[0]
if output_hidden_states:
_snake_case = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = PoolFormerConfig
_UpperCAmelCase : Optional[int] = "poolformer"
_UpperCAmelCase : str = "pixel_values"
_UpperCAmelCase : int = True
def A ( self : Tuple , lowercase : str ):
'''simple docstring'''
if isinstance(lowercase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowercase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def A ( self : Optional[Any] , lowercase : str , lowercase : Dict=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : Optional[Any] = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : Tuple = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : str , lowercase : List[Any] ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = PoolFormerEncoder(lowercase )
# Initialize weights and apply final processing
self.post_init()
def A ( self : List[str] ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Tuple , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , )
_snake_case = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Linear(config.hidden_size , config.hidden_size )
def A ( self : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = self.dense(lowercase )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : str , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = PoolFormerModel(lowercase )
# Final norm
_snake_case = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_snake_case = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.poolformer(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , )
_snake_case = outputs[0]
_snake_case = self.classifier(self.norm(lowercase ).mean([-2, -1] ) )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states ) | 130 | 1 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __lowercase ( __lowercase , __lowercase=None ) -> List[Any]:
'''simple docstring'''
_A = None
if token is not None:
_A = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_A = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_A = requests.get(__lowercase , headers=__lowercase ).json()
_A = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_A = math.ceil((result["total_count"] - 100) / 100 )
for i in range(__lowercase ):
_A = requests.get(url + F'''&page={i + 2}''' , headers=__lowercase ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __lowercase ( __lowercase , __lowercase=None ) -> Union[str, Any]:
'''simple docstring'''
_A = None
if token is not None:
_A = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_A = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_A = requests.get(__lowercase , headers=__lowercase ).json()
_A = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
_A = math.ceil((result["total_count"] - 100) / 100 )
for i in range(__lowercase ):
_A = requests.get(url + F'''&page={i + 2}''' , headers=__lowercase ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> int:
'''simple docstring'''
_A = None
if token is not None:
_A = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_A = requests.get(__lowercase , headers=__lowercase , allow_redirects=__lowercase )
_A = result.headers["Location"]
_A = requests.get(__lowercase , allow_redirects=__lowercase )
_A = os.path.join(__lowercase , F'''{artifact_name}.zip''' )
with open(__lowercase , "wb" ) as fp:
fp.write(response.content )
def __lowercase ( __lowercase , __lowercase=None ) -> Dict:
'''simple docstring'''
_A = []
_A = []
_A = None
with zipfile.ZipFile(__lowercase ) as z:
for filename in z.namelist():
if not os.path.isdir(__lowercase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__lowercase ) as f:
for line in f:
_A = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_A = line[: line.index(": " )]
_A = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
_A = line[len("FAILED " ) :]
failed_tests.append(__lowercase )
elif filename == "job_name.txt":
_A = line
if len(__lowercase ) != len(__lowercase ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(__lowercase )} for `errors` '''
F'''and {len(__lowercase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
" problem." )
_A = None
if job_name and job_links:
_A = job_links.get(__lowercase , __lowercase )
# A list with elements of the form (line of error, error, failed test)
_A = [x + [y] + [job_link] for x, y in zip(__lowercase , __lowercase )]
return result
def __lowercase ( __lowercase , __lowercase=None ) -> List[Any]:
'''simple docstring'''
_A = []
_A = [os.path.join(__lowercase , __lowercase ) for p in os.listdir(__lowercase ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__lowercase , job_links=__lowercase ) )
return errors
def __lowercase ( __lowercase , __lowercase=None ) -> List[Any]:
'''simple docstring'''
_A = Counter()
counter.update([x[1] for x in logs] )
_A = counter.most_common()
_A = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_A = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
_A = dict(sorted(r.items() , key=lambda __lowercase : item[1]["count"] , reverse=__lowercase ) )
return r
def __lowercase ( __lowercase ) -> List[str]:
'''simple docstring'''
_A = test.split("::" )[0]
if test.startswith("tests/models/" ):
_A = test.split("/" )[2]
else:
_A = None
return test
def __lowercase ( __lowercase , __lowercase=None ) -> Dict:
'''simple docstring'''
_A = [(x[0], x[1], get_model(x[2] )) for x in logs]
_A = [x for x in logs if x[2] is not None]
_A = {x[2] for x in logs}
_A = {}
for test in tests:
_A = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_A = counter.most_common()
_A = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_A = sum(error_counts.values() )
if n_errors > 0:
_A = {"count": n_errors, "errors": error_counts}
_A = dict(sorted(r.items() , key=lambda __lowercase : item[1]["count"] , reverse=__lowercase ) )
return r
def __lowercase ( __lowercase ) -> Union[str, Any]:
'''simple docstring'''
_A = "| no. | error | status |"
_A = "|-:|:-|:-|"
_A = [header, sep]
for error in reduced_by_error:
_A = reduced_by_error[error]["count"]
_A = F'''| {count} | {error[:100]} | |'''
lines.append(__lowercase )
return "\n".join(__lowercase )
def __lowercase ( __lowercase ) -> str:
'''simple docstring'''
_A = "| model | no. of errors | major error | count |"
_A = "|-:|-:|-:|-:|"
_A = [header, sep]
for model in reduced_by_model:
_A = reduced_by_model[model]["count"]
_A , _A = list(reduced_by_model[model]["errors"].items() )[0]
_A = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(__lowercase )
return "\n".join(__lowercase )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
lowerCamelCase_ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCamelCase_ = get_job_links(args.workflow_run_id, token=args.token)
lowerCamelCase_ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCamelCase_ = k.find(''' / ''')
lowerCamelCase_ = k[index + len(''' / ''') :]
lowerCamelCase_ = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCamelCase_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCamelCase_ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCamelCase_ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCamelCase_ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCamelCase_ = reduce_by_error(errors)
lowerCamelCase_ = reduce_by_model(errors)
lowerCamelCase_ = make_github_table(reduced_by_error)
lowerCamelCase_ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 79 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_A = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def a__ ( lowerCAmelCase ) -> List[Any]:
UpperCAmelCase__ : Dict = _TestCommandArgs(dataset=lowerCAmelCase , all_configs=lowerCAmelCase , save_infos=lowerCAmelCase )
UpperCAmelCase__ : List[Any] = TestCommand(*lowerCAmelCase )
test_command.run()
UpperCAmelCase__ : List[Any] = os.path.join(lowerCAmelCase , """README.md""" )
assert os.path.exists(lowerCAmelCase )
UpperCAmelCase__ : List[str] = DatasetInfosDict.from_directory(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2_35_15_63,
"""num_examples""": 1_00_00,
},
{
"""name""": """validation""",
"""num_bytes""": 23_84_18,
"""num_examples""": 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = getattr(dataset_infos["""default"""] , lowerCAmelCase ), getattr(expected_dataset_infos["""default"""] , lowerCAmelCase )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase , lowerCAmelCase )
elif key == "splits":
assert list(lowerCAmelCase ) == list(lowerCAmelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 171 | 0 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = WavaVecaForSequenceClassification.from_pretrained(lowerCAmelCase_ , config=lowerCAmelCase_ )
lowerCAmelCase__ = downstream_dict["projector.weight"]
lowerCAmelCase__ = downstream_dict["projector.bias"]
lowerCAmelCase__ = downstream_dict["model.post_net.linear.weight"]
lowerCAmelCase__ = downstream_dict["model.post_net.linear.bias"]
return model
def _A ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any ):
"""simple docstring"""
lowerCAmelCase__ = WavaVecaForAudioFrameClassification.from_pretrained(lowerCAmelCase_ , config=lowerCAmelCase_ )
lowerCAmelCase__ = downstream_dict["model.linear.weight"]
lowerCAmelCase__ = downstream_dict["model.linear.bias"]
return model
def _A ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = WavaVecaForXVector.from_pretrained(lowerCAmelCase_ , config=lowerCAmelCase_ )
lowerCAmelCase__ = downstream_dict["connector.weight"]
lowerCAmelCase__ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowerCAmelCase__ = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
lowerCAmelCase__ = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
lowerCAmelCase__ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
lowerCAmelCase__ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
lowerCAmelCase__ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
lowerCAmelCase__ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
lowerCAmelCase__ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = torch.load(lowerCAmelCase_ , map_location="cpu" )
lowerCAmelCase__ = checkpoint["Downstream"]
lowerCAmelCase__ = WavaVecaConfig.from_pretrained(lowerCAmelCase_ )
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ )
lowerCAmelCase__ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
lowerCAmelCase__ = convert_classification(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
elif arch.endswith("ForAudioFrameClassification" ):
lowerCAmelCase__ = convert_diarization(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
elif arch.endswith("ForXVector" ):
lowerCAmelCase__ = convert_xvector(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
lowerCAmelCase__ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowerCAmelCase_ )
hf_model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
UpperCamelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 365 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
UpperCamelCase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
UpperCamelCase = F"""down_blocks.{i}.resnets.{j}."""
UpperCamelCase = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
UpperCamelCase = F"""down_blocks.{i}.attentions.{j}."""
UpperCamelCase = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
UpperCamelCase = F"""up_blocks.{i}.resnets.{j}."""
UpperCamelCase = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
UpperCamelCase = F"""up_blocks.{i}.attentions.{j}."""
UpperCamelCase = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
UpperCamelCase = F"""down_blocks.{i}.downsamplers.0.conv."""
UpperCamelCase = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
UpperCamelCase = F"""up_blocks.{i}.upsamplers.0."""
UpperCamelCase = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
UpperCamelCase = 'mid_block.attentions.0.'
UpperCamelCase = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
UpperCamelCase = F"""mid_block.resnets.{j}."""
UpperCamelCase = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _A ( lowerCAmelCase_ : Any ):
"""simple docstring"""
lowerCAmelCase__ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowerCAmelCase__ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowerCAmelCase__ = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowerCAmelCase__ = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = v
lowerCAmelCase__ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
UpperCamelCase = F"""encoder.down_blocks.{i}.resnets.{j}."""
UpperCamelCase = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
UpperCamelCase = F"""down_blocks.{i}.downsamplers.0."""
UpperCamelCase = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
UpperCamelCase = F"""up_blocks.{i}.upsamplers.0."""
UpperCamelCase = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
UpperCamelCase = F"""decoder.up_blocks.{i}.resnets.{j}."""
UpperCamelCase = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
UpperCamelCase = F"""mid_block.resnets.{i}."""
UpperCamelCase = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def _A ( lowerCAmelCase_ : List[str] ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def _A ( lowerCAmelCase_ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowerCAmelCase__ = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowerCAmelCase__ = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = v
lowerCAmelCase__ = {v: vae_state_dict[k] for k, v in mapping.items()}
lowerCAmelCase__ = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'mid.attn_1.{weight_name}.weight' in k:
print(F'Reshaping {k} for SD format' )
lowerCAmelCase__ = reshape_weight_for_sd(lowerCAmelCase_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
UpperCamelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
UpperCamelCase = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
UpperCamelCase = {'q': 0, 'k': 1, 'v': 2}
def _A ( lowerCAmelCase_ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
lowerCAmelCase__ = k[: -len(".q_proj.weight" )]
lowerCAmelCase__ = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
lowerCAmelCase__ = [None, None, None]
lowerCAmelCase__ = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
lowerCAmelCase__ = k[: -len(".q_proj.bias" )]
lowerCAmelCase__ = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
lowerCAmelCase__ = [None, None, None]
lowerCAmelCase__ = v
continue
lowerCAmelCase__ = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ )
lowerCAmelCase__ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowerCAmelCase__ = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ )
lowerCAmelCase__ = torch.cat(lowerCAmelCase_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowerCAmelCase__ = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ )
lowerCAmelCase__ = torch.cat(lowerCAmelCase_ )
return new_state_dict
def _A ( lowerCAmelCase_ : Any ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
UpperCamelCase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
UpperCamelCase = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
UpperCamelCase = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
UpperCamelCase = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
UpperCamelCase = load_file(unet_path, device='cpu')
else:
UpperCamelCase = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
UpperCamelCase = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
UpperCamelCase = load_file(vae_path, device='cpu')
else:
UpperCamelCase = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
UpperCamelCase = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
UpperCamelCase = load_file(text_enc_path, device='cpu')
else:
UpperCamelCase = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
UpperCamelCase = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
UpperCamelCase = convert_unet_state_dict(unet_state_dict)
UpperCamelCase = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
UpperCamelCase = convert_vae_state_dict(vae_state_dict)
UpperCamelCase = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
UpperCamelCase = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
UpperCamelCase = {'transformer.' + k: v for k, v in text_enc_dict.items()}
UpperCamelCase = convert_text_enc_state_dict_vaa(text_enc_dict)
UpperCamelCase = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
UpperCamelCase = convert_text_enc_state_dict(text_enc_dict)
UpperCamelCase = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
UpperCamelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
UpperCamelCase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
UpperCamelCase = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 221 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
snake_case_ : Tuple = random.Random()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_=1.0 , UpperCAmelCase_=None , UpperCAmelCase_=None ):
if rng is None:
_UpperCamelCase : Dict = global_rng
_UpperCamelCase : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase ):
def __init__( self : Tuple ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int=7 ,lowerCamelCase__ : str=400 ,lowerCamelCase__ : int=2000 ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Union[str, Any]=16000 ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Optional[int]=True ,):
'''simple docstring'''
_UpperCamelCase : Optional[int] = parent
_UpperCamelCase : Union[str, Any] = batch_size
_UpperCamelCase : List[str] = min_seq_length
_UpperCamelCase : Optional[int] = max_seq_length
_UpperCamelCase : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCamelCase : List[str] = feature_size
_UpperCamelCase : List[str] = padding_value
_UpperCamelCase : List[Any] = sampling_rate
_UpperCamelCase : Dict = return_attention_mask
_UpperCamelCase : Tuple = do_normalize
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Tuple=False ):
'''simple docstring'''
def _flatten(lowerCamelCase__ : Optional[Any] ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_UpperCamelCase : Optional[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_UpperCamelCase : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
_UpperCamelCase : int = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = WavaVecaFeatureExtractor
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : List[str] = WavaVecaFeatureExtractionTester(self )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowerCamelCase__ ,axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ,axis=0 ) - 1 ) < 1E-3 ) )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCamelCase : int = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : Tuple = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
_UpperCamelCase : Tuple = feat_extract(speech_inputs[0] ,return_tensors='np' ).input_values
_UpperCamelCase : Any = feat_extract(np_speech_inputs[0] ,return_tensors='np' ).input_values
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-3 ) )
# Test batched
_UpperCamelCase : Union[str, Any] = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
_UpperCamelCase : Optional[int] = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCamelCase : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCamelCase : str = np.asarray(lowerCamelCase__ )
_UpperCamelCase : List[str] = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
_UpperCamelCase : int = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-3 ) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad']
_UpperCamelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = feat_extract(lowerCamelCase__ ,padding=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='np' )
_UpperCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : List[str] = range(800 ,1400 ,200 )
_UpperCamelCase : List[str] = [floats_list((1, x) )[0] for x in lengths]
_UpperCamelCase : Optional[Any] = ['longest', 'max_length', 'do_not_pad']
_UpperCamelCase : str = [None, 1600, None]
for max_length, padding in zip(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : List[str] = feat_extract(lowerCamelCase__ ,max_length=lowerCamelCase__ ,padding=lowerCamelCase__ )
_UpperCamelCase : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : List[Any] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : Union[str, Any] = feat_extract(
lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=1000 ,padding='max_length' ,return_tensors='np' )
_UpperCamelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : int = feat_extract(
lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=1000 ,padding='longest' ,return_tensors='np' )
_UpperCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
_UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : Any = feat_extract(
lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=2000 ,padding='longest' ,return_tensors='np' )
_UpperCamelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
import torch
_UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : Optional[int] = np.random.rand(100 ).astype(np.floataa )
_UpperCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCamelCase : Optional[int] = feature_extractor.pad([{'input_values': inputs}] ,return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_UpperCamelCase : Tuple = feature_extractor.pad([{'input_values': inputs}] ,return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_UpperCamelCase : Optional[int] = WavaVecaConfig.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Any = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask ,config.feat_extract_norm == 'layer' )
| 83 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class lowercase__ :
lowercase__ = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
lowercase__ = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} , )
lowercase__ = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def A__ ( ):
_UpperCamelCase : Optional[Any] = HfArgumentParser((ModelArguments,) )
((_UpperCamelCase) , ) : Optional[int] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
_UpperCamelCase : Any = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
_UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
_UpperCamelCase : str = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
_UpperCamelCase : str = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
_UpperCamelCase : List[Any] = True
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : str = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=UpperCAmelCase_ , decoder_config=UpperCAmelCase_ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
_UpperCamelCase : str = decoder_config.decoder_start_token_id
_UpperCamelCase : Optional[int] = decoder_config.pad_token_id
if decoder_start_token_id is None:
_UpperCamelCase : int = decoder_config.bos_token_id
if pad_token_id is None:
_UpperCamelCase : Dict = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
_UpperCamelCase : List[Any] = decoder_config.eos_token_id
_UpperCamelCase : Dict = decoder_start_token_id
_UpperCamelCase : int = pad_token_id
_UpperCamelCase : List[str] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
_UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 83 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
def __a ( UpperCAmelCase ) ->List[int]:
"""simple docstring"""
if isinstance(UpperCAmelCase , np.ndarray ):
return list(tensor.shape )
A = tf.shape(UpperCAmelCase )
if tensor.shape == tf.TensorShape(UpperCAmelCase ):
return dynamic
A = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase )]
def __a ( UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) ->tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1E-9 , axis=UpperCAmelCase , name=UpperCAmelCase )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase=-1 ) ->str:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
A , A = tf.nn.moments(UpperCAmelCase , axes=[axis] , keepdims=UpperCAmelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
A = [1] * inputs.shape.rank
A = shape_list(UpperCAmelCase )[axis]
A = tf.reshape(UpperCAmelCase , UpperCAmelCase )
A = tf.reshape(UpperCAmelCase , UpperCAmelCase )
# Compute layer normalization using the batch_normalization
# function.
A = tf.nn.batch_normalization(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , offset=UpperCAmelCase , scale=UpperCAmelCase , variance_epsilon=UpperCAmelCase , )
return outputs
def __a ( UpperCAmelCase , UpperCAmelCase=0 , UpperCAmelCase=-1 ) ->int:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
A = tf.shape(UpperCAmelCase )
A = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
A = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCAmelCase , UpperCAmelCase )
def __a ( UpperCAmelCase ) ->tf.Tensor:
"""simple docstring"""
if not isinstance(UpperCAmelCase , tf.Tensor ):
A = tf.convert_to_tensor(UpperCAmelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
A = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
A = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
A = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = "input_ids" ) ->None:
"""simple docstring"""
tf.debugging.assert_less(
UpperCAmelCase , tf.cast(UpperCAmelCase , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
A = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
A = [x for x in data if len(UpperCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
A = np.asarray(UpperCAmelCase )
A = 1
A = np.array_split(UpperCAmelCase , UpperCAmelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
A = np.array_split(UpperCAmelCase , UpperCAmelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCAmelCase ):
A = chunk_data
else:
A = data
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if name in group.attrs:
A = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase , """decode""" ) else n for n in group.attrs[name]]
else:
A = []
A = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(UpperCAmelCase , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def __a ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
def _expand_single_ad_tensor(UpperCAmelCase ):
if isinstance(UpperCAmelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCAmelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase )
| 337 |
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
return [ord(UpperCAmelCase ) - 96 for elem in plain]
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def __a ( ) ->None:
"""simple docstring"""
A = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , UpperCAmelCase )
print("""Decoded:""" , decode(UpperCAmelCase ) )
if __name__ == "__main__":
main()
| 337 | 1 |
"""simple docstring"""
import math
class a :
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ):
_UpperCAmelCase = 0.0
_UpperCAmelCase = 0.0
for i in range(len(__lowerCAmelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : list[list[int | float]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : float ):
for i in range(len(__lowerCAmelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __UpperCAmelCase ( ):
"""simple docstring"""
# Training Examples ( m, n )
_UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCAmelCase = SelfOrganizingMap()
_UpperCAmelCase = 3
_UpperCAmelCase = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_UpperCAmelCase = training_samples[j]
# Compute the winning vector
_UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase )
# Update the winning vector
_UpperCAmelCase = self_organizing_map.update(lowercase ,lowercase ,lowercase ,lowercase )
# classify test sample
_UpperCAmelCase = [0, 0, 0, 1]
_UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase )
# results
print(f'''Clusters that the test sample belongs to : {winner}''' )
print(f'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 289 | """simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Optional[Any]=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : List[str]=400 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=None , __lowerCAmelCase : List[str]=True , ):
_UpperCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_flip_channel_order
def lowerCAmelCase_ ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[int] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_flip_channel_order""" ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCAmelCase_ ( self : List[str] ):
pass
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self : str ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 289 | 1 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
__lowerCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
__lowerCamelCase = tokenizer('Hello there' , return_tensors='np' ).input_ids
__lowerCamelCase = tokenizer('Hi I am' , return_tensors='np' ).input_ids
__lowerCamelCase = shift_tokens_right(lowerCamelCase__ , model.config.pad_token_id , model.config.decoder_start_token_id )
__lowerCamelCase = model(lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ).logits
__lowerCamelCase = optax.softmax_cross_entropy(lowerCamelCase__ , onehot(lowerCamelCase__ , logits.shape[-1] ) ).mean()
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 348 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : list[float] , UpperCamelCase__ : list[float] ) -> float:
"""simple docstring"""
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase , __lowerCamelCase = divmod(len(UpperCamelCase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = [float(x) for x in input("Enter the elements of first array: ").split()]
__A = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 348 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase ):
"""simple docstring"""
a : str ="swin"
a : Union[str, Any] ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=224 , snake_case__=4 , snake_case__=3 , snake_case__=96 , snake_case__=[2, 2, 6, 2] , snake_case__=[3, 6, 12, 24] , snake_case__=7 , snake_case__=4.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=False , snake_case__=0.02 , snake_case__=1e-5 , snake_case__=32 , snake_case__=None , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
lowerCAmelCase : Any = image_size
lowerCAmelCase : Union[str, Any] = patch_size
lowerCAmelCase : List[Any] = num_channels
lowerCAmelCase : Tuple = embed_dim
lowerCAmelCase : Any = depths
lowerCAmelCase : List[Any] = len(snake_case__ )
lowerCAmelCase : List[str] = num_heads
lowerCAmelCase : Union[str, Any] = window_size
lowerCAmelCase : Optional[Any] = mlp_ratio
lowerCAmelCase : List[str] = qkv_bias
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase : Any = drop_path_rate
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Optional[int] = use_absolute_embeddings
lowerCAmelCase : int = layer_norm_eps
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Optional[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : int = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
lowerCAmelCase : Tuple = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(snake_case__ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : List[Any] = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[Any] =version.parse("1.11" )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-4
| 108 | import numpy as np
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1e-12 , SCREAMING_SNAKE_CASE = 100 , ):
'''simple docstring'''
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[1]
# Ensure proper dimensionality.
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(SCREAMING_SNAKE_CASE ) == np.iscomplexobj(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = np.iscomplexobj(SCREAMING_SNAKE_CASE )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(SCREAMING_SNAKE_CASE , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__UpperCamelCase :str = False
__UpperCamelCase :int = 0
__UpperCamelCase :Optional[Any] = 0
__UpperCamelCase :Union[str, Any] = 1e12
while not convergence:
# Multiple matrix by the vector.
__UpperCamelCase :List[str] = np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Normalize the resulting output vector.
__UpperCamelCase :Tuple = w / np.linalg.norm(SCREAMING_SNAKE_CASE )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__UpperCamelCase :int = vector.conj().T if is_complex else vector.T
__UpperCamelCase :Optional[int] = np.dot(SCREAMING_SNAKE_CASE , np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check convergence.
__UpperCamelCase :Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__UpperCamelCase :Dict = True
__UpperCamelCase :List[Any] = lambda_
if is_complex:
__UpperCamelCase :Tuple = np.real(lambda_ )
return lambda_, vector
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__UpperCamelCase :Optional[Any] = np.array([41, 4, 20] )
__UpperCamelCase :Any = real_input_matrix.astype(np.complexaaa )
__UpperCamelCase :Dict = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__UpperCamelCase :Optional[int] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__UpperCamelCase :Any = real_input_matrix
__UpperCamelCase :int = real_vector
elif problem_type == "complex":
__UpperCamelCase :Tuple = complex_input_matrix
__UpperCamelCase :Optional[Any] = complex_vector
# Our implementation.
__UpperCamelCase , __UpperCamelCase :Dict = power_iteration(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__UpperCamelCase , __UpperCamelCase :List[Any] = np.linalg.eigh(SCREAMING_SNAKE_CASE )
# Last eigenvalue is the maximum one.
__UpperCamelCase :List[Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__UpperCamelCase :str = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(SCREAMING_SNAKE_CASE ) - np.abs(SCREAMING_SNAKE_CASE ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 43 | 0 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_A = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_A = [ord(letter) for letter in string.ascii_lowercase]
_A = {ord(char) for char in VALID_CHARS}
_A = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = ''
lowerCamelCase : int = 42
lowerCamelCase : List[Any] = 42
lowerCamelCase : List[Any] = 42
for keychar, cipherchar in zip(cycle(_UpperCamelCase ), _UpperCamelCase ):
lowerCamelCase : int = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_UpperCamelCase )
return decoded
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = []
for key in product(_UpperCamelCase, repeat=3 ):
lowerCamelCase : str = try_key(_UpperCamelCase, _UpperCamelCase )
if encoded is not None:
possibles.append(_UpperCamelCase )
return possibles
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCAmelCase ( a_ = "p059_cipher.txt" ):
'''simple docstring'''
lowerCamelCase : List[str] = 42
lowerCamelCase : Any = 42
lowerCamelCase : str = 42
lowerCamelCase : Union[str, Any] = 42
lowerCamelCase : List[Any] = Path(_UpperCamelCase ).parent.joinpath(_UpperCamelCase ).read_text(encoding='utf-8' )
lowerCamelCase : str = [int(_UpperCamelCase ) for number in data.strip().split(',' )]
lowerCamelCase : List[str] = filter_valid_chars(_UpperCamelCase )
for common_word in COMMON_WORDS:
lowerCamelCase : Optional[int] = filter_common_word(_UpperCamelCase, _UpperCamelCase )
if len(_UpperCamelCase ) == 1:
break
lowerCamelCase : Optional[Any] = possibles[0]
return sum(ord(_UpperCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 359 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowercase ( __UpperCAmelCase ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'ChineseCLIPImageProcessor'
lowercase_ = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ ) -> List[str]:
lowerCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCAmelCase_ , )
lowerCamelCase : int = kwargs.pop('feature_extractor' )
lowerCamelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase : Tuple = self.image_processor
def __call__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ ) -> str:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase : Any = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if images is not None:
lowerCamelCase : Dict = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None and images is not None:
lowerCamelCase : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> List[str]:
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> Optional[int]:
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase : List[Any] = self.tokenizer.model_input_names
lowerCamelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCamelCase ( self ) -> List[str]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase_ , )
return self.image_processor_class
| 205 | 0 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : list[float] ) -> Dict:
return np.maximum(0 , _lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309 | 0 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
UpperCAmelCase_ : List[Any] =logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class _SCREAMING_SNAKE_CASE ( __a ):
def __init__( self : Any , __lowerCamelCase : int = 101 ):
UpperCamelCase :Optional[Any] = length
def __len__( self : Any ):
return self.length
def __getitem__( self : List[str] , __lowerCamelCase : List[Any] ):
return i
class _SCREAMING_SNAKE_CASE :
def __call__( self : int , __lowerCamelCase : Dict ):
return {"input_ids": torch.tensor(a__ ), "labels": torch.tensor(a__ )}
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Optional[int] ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase :List[str] = nn.Linear(120 , 80 )
def _A ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[Any]=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class _SCREAMING_SNAKE_CASE ( __a ):
@require_torch_neuroncore
def _A ( self : List[Any] ):
UpperCamelCase :List[str] = F"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
UpperCamelCase :Dict = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[int] = F"""--output_dir {output_dir}""".split()
UpperCamelCase :int = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(a__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class _SCREAMING_SNAKE_CASE ( __a ):
@require_torch_multi_gpu
def _A ( self : Dict ):
UpperCamelCase :Dict = F"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
UpperCamelCase :Union[str, Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase :int = F"""--output_dir {output_dir}""".split()
UpperCamelCase :Dict = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(a__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
UpperCAmelCase_ : Optional[int] =HfArgumentParser((TrainingArguments,))
UpperCAmelCase_ : str =parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
UpperCAmelCase_ : Optional[Any] =DummyDataset(dataset_length)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase :Any = list(range(len(__magic_name__ ) ) )
UpperCamelCase :Optional[int] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"""Predictions and/or labels do not match expected results:\n - predictions: """
f"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
UpperCAmelCase_ : int =Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
UpperCAmelCase_ : Union[str, Any] =trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCAmelCase_ : int =trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCAmelCase_ : int =2
UpperCAmelCase_ : Any =trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCAmelCase_ : Dict =trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCAmelCase_ : Tuple =None
| 359 |
from string import ascii_uppercase
UpperCAmelCase_ : str = {str(ord(c) - 55): c for c in ascii_uppercase}
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> str:
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
UpperCamelCase :Any = """"""
UpperCamelCase :Any = 0
UpperCamelCase :int = 0
while div != 1:
UpperCamelCase , UpperCamelCase :str = divmod(__magic_name__ , __magic_name__ )
if base >= 11 and 9 < mod < 36:
UpperCamelCase :List[str] = ALPHABET_VALUES[str(__magic_name__ )]
else:
UpperCamelCase :Dict = str(__magic_name__ )
new_value += actual_value
UpperCamelCase :int = num // base
UpperCamelCase :Any = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__magic_name__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(10_00):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 62 | 0 |
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
requires_backends(self , 'decord' )
self.check_model_type(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
__lowerCAmelCase : Union[str, Any] = {}
if frame_sampling_rate is not None:
__lowerCAmelCase : Optional[int] = frame_sampling_rate
if num_frames is not None:
__lowerCAmelCase : int = num_frames
__lowerCAmelCase : Any = {}
if top_k is not None:
__lowerCAmelCase : Optional[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1 ):
if num_frames is None:
__lowerCAmelCase : Union[str, Any] = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
__lowerCAmelCase : Tuple = BytesIO(requests.get(_SCREAMING_SNAKE_CASE ).content )
__lowerCAmelCase : str = VideoReader(_SCREAMING_SNAKE_CASE )
videoreader.seek(0 )
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : str = num_frames * frame_sampling_rate - 1
__lowerCAmelCase : Union[str, Any] = np.linspace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num=_SCREAMING_SNAKE_CASE , dtype=np.intaa )
__lowerCAmelCase : int = videoreader.get_batch(_SCREAMING_SNAKE_CASE ).asnumpy()
__lowerCAmelCase : Dict = list(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
return model_inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self.model(**_SCREAMING_SNAKE_CASE )
return model_outputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 ):
if top_k > self.model.config.num_labels:
__lowerCAmelCase : List[str] = self.model.config.num_labels
if self.framework == "pt":
__lowerCAmelCase : Union[str, Any] = model_outputs.logits.softmax(-1 )[0]
__lowerCAmelCase , __lowerCAmelCase : Any = probs.topk(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__lowerCAmelCase : Any = scores.tolist()
__lowerCAmelCase : List[str] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] | 86 | '''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = '''instructblip_vision_model'''
def __init__( self : str , lowerCAmelCase__ : Dict=1_4_0_8 , lowerCAmelCase__ : int=6_1_4_4 , lowerCAmelCase__ : List[str]=3_9 , lowerCAmelCase__ : int=1_6 , lowerCAmelCase__ : Tuple=2_2_4 , lowerCAmelCase__ : Tuple=1_4 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Union[str, Any]=1e-6 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : Optional[int]=1e-10 , lowerCAmelCase__ : Dict=True , **lowerCAmelCase__ : str , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : List[Any] = image_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : int = attention_dropout
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Tuple = qkv_bias
@classmethod
def _lowerCAmelCase ( cls : Optional[int] , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : Any ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
_UpperCAmelCase : int = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = '''instructblip_qformer'''
def __init__( self : List[str] , lowerCAmelCase__ : Union[str, Any]=3_0_5_2_2 , lowerCAmelCase__ : Dict=7_6_8 , lowerCAmelCase__ : Tuple=1_2 , lowerCAmelCase__ : Optional[Any]=1_2 , lowerCAmelCase__ : Union[str, Any]=3_0_7_2 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Dict=5_1_2 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : Optional[int]=1e-12 , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : Union[str, Any]="absolute" , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : int=1_4_0_8 , **lowerCAmelCase__ : List[str] , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : List[str] = layer_norm_eps
_UpperCAmelCase : Tuple = position_embedding_type
_UpperCAmelCase : Tuple = cross_attention_frequency
_UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def _lowerCAmelCase ( cls : Dict , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : Optional[int] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : List[str] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
_UpperCAmelCase : Tuple = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = '''instructblip'''
UpperCamelCase_ : Dict = True
def __init__( self : Tuple , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Union[str, Any]=3_2 , **lowerCAmelCase__ : Dict ) -> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
if vision_config is None:
_UpperCAmelCase : List[str] = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
_UpperCAmelCase : Tuple = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
_UpperCAmelCase : int = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
_UpperCAmelCase : List[str] = InstructBlipVisionConfig(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = InstructBlipQFormerConfig(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = text_config["model_type"] if "model_type" in text_config else "opt"
_UpperCAmelCase : Optional[int] = CONFIG_MAPPING[text_model_type](**lowerCAmelCase__ )
_UpperCAmelCase : Dict = self.text_config.tie_word_embeddings
_UpperCAmelCase : List[Any] = self.text_config.is_encoder_decoder
_UpperCAmelCase : List[str] = num_query_tokens
_UpperCAmelCase : int = self.vision_config.hidden_size
_UpperCAmelCase : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_UpperCAmelCase : int = 1.0
_UpperCAmelCase : Dict = 0.02
@classmethod
def _lowerCAmelCase ( cls : Dict , lowerCAmelCase__ : InstructBlipVisionConfig , lowerCAmelCase__ : InstructBlipQFormerConfig , lowerCAmelCase__ : PretrainedConfig , **lowerCAmelCase__ : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase__ , )
def _lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
_UpperCAmelCase : List[Any] = self.qformer_config.to_dict()
_UpperCAmelCase : List[Any] = self.text_config.to_dict()
_UpperCAmelCase : Dict = self.__class__.model_type
return output | 145 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
snake_case : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCamelCase__ )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
snake_case : int = self.dummy_uncond_unet
snake_case : List[Any] = DDIMScheduler()
snake_case : Optional[int] = self.dummy_vq_model
snake_case : int = LDMPipeline(unet=UpperCamelCase__ , vqvae=UpperCamelCase__ , scheduler=UpperCamelCase__ )
ldm.to(UpperCamelCase__ )
ldm.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Union[str, Any] = torch.manual_seed(0 )
snake_case : List[str] = ldm(generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''numpy''' ).images
snake_case : Any = torch.manual_seed(0 )
snake_case : Optional[Any] = ldm(generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''numpy''' , return_dict=UpperCamelCase__ )[0]
snake_case : Dict = image[0, -3:, -3:, -1]
snake_case : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case : Tuple = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] )
snake_case : Optional[Any] = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
snake_case : int = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(UpperCamelCase__ )
ldm.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : int = torch.manual_seed(0 )
snake_case : Optional[Any] = ldm(generator=UpperCamelCase__ , num_inference_steps=5 , output_type='''numpy''' ).images
snake_case : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case : List[Any] = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] )
snake_case : Optional[Any] = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 83 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
lowercase__ = parser.parse_args()
lowercase__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 83 | 1 |
import re
from filelock import FileLock
try:
import nltk
_lowerCAmelCase : List[Any] = True
except (ImportError, ModuleNotFoundError):
_lowerCAmelCase : Any = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def lowerCAmelCase ( _lowerCAmelCase : str ):
"""simple docstring"""
re.sub("<n>" , "" , _lowerCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCAmelCase ) )
| 169 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCAmelCase ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
UpperCAmelCase__ = TOKENIZER_CLASSES
else:
UpperCAmelCase__ = {tokenizer_name: getattr(_lowerCAmelCase , tokenizer_name + "Fast" )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
UpperCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase__ = True
if checkpoint_name is None:
UpperCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase__ = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
UpperCAmelCase__ = tokenizer_class.from_pretrained(_lowerCAmelCase , force_download=_lowerCAmelCase )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase__ , UpperCAmelCase__ = checkpoint.split("/" )
UpperCAmelCase__ = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
elif add_prefix:
UpperCAmelCase__ = checkpoint
UpperCAmelCase__ = dump_path
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase__ = file_path.split(_lowerCAmelCase )[-1][0]
if next_char == "/":
UpperCAmelCase__ = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
UpperCAmelCase__ = tokenizer.save_pretrained(
_lowerCAmelCase , legacy_format=_lowerCAmelCase , filename_prefix=_lowerCAmelCase )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(_lowerCAmelCase )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
_lowerCAmelCase : str = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 169 | 1 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
a_ = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
a_ = {
'ctrl': 256,
}
a_ = {
'Pregnancy': 168_629,
'Christianity': 7_675,
'Explain': 106_423,
'Fitness': 63_440,
'Saving': 63_163,
'Ask': 27_171,
'Ass': 95_985,
'Joke': 163_509,
'Questions': 45_622,
'Thoughts': 49_605,
'Retail': 52_342,
'Feminism': 164_338,
'Writing': 11_992,
'Atheism': 192_263,
'Netflix': 48_616,
'Computing': 39_639,
'Opinion': 43_213,
'Alone': 44_967,
'Funny': 58_917,
'Gaming': 40_358,
'Human': 4_088,
'India': 1_331,
'Joker': 77_138,
'Diet': 36_206,
'Legal': 11_859,
'Norman': 4_939,
'Tip': 72_689,
'Weight': 52_343,
'Movies': 46_273,
'Running': 23_425,
'Science': 2_090,
'Horror': 37_793,
'Confession': 60_572,
'Finance': 12_250,
'Politics': 16_360,
'Scary': 191_985,
'Support': 12_654,
'Technologies': 32_516,
'Teenage': 66_160,
'Event': 32_769,
'Learned': 67_460,
'Notion': 182_770,
'Wikipedia': 37_583,
'Books': 6_665,
'Extract': 76_050,
'Confessions': 102_701,
'Conspiracy': 75_932,
'Links': 63_674,
'Narcissus': 150_425,
'Relationship': 54_766,
'Relationships': 134_796,
'Reviews': 41_671,
'News': 4_256,
'Translation': 26_820,
'multilingual': 128_406,
}
def __lowercase ( lowerCamelCase : List[str] ):
UpperCamelCase_ : Optional[int] = set()
UpperCamelCase_ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase_ : Any = char
UpperCamelCase_ : Any = set(lowerCamelCase )
return pairs
class _lowercase ( snake_case_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = CONTROL_CODES
def __init__( self : Optional[int] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Tuple="<unk>" , **snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
super().__init__(unk_token=snake_case , **snake_case )
with open(snake_case , encoding='utf-8' ) as vocab_handle:
UpperCamelCase_ : Optional[Any] = json.load(snake_case )
UpperCamelCase_ : Any = {v: k for k, v in self.encoder.items()}
with open(snake_case , encoding='utf-8' ) as merges_handle:
UpperCamelCase_ : str = merges_handle.read().split('\n' )[1:-1]
UpperCamelCase_ : Any = [tuple(merge.split() ) for merge in merges]
UpperCamelCase_ : int = dict(zip(snake_case , range(len(snake_case ) ) ) )
UpperCamelCase_ : Union[str, Any] = {}
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase_ : List[Any] = tuple(snake_case )
UpperCamelCase_ : Optional[int] = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
UpperCamelCase_ : Optional[Any] = get_pairs(snake_case )
if not pairs:
return token
while True:
UpperCamelCase_ : Any = min(snake_case , key=lambda snake_case : self.bpe_ranks.get(snake_case , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase_, UpperCamelCase_ : Optional[int] = bigram
UpperCamelCase_ : Any = []
UpperCamelCase_ : Dict = 0
while i < len(snake_case ):
try:
UpperCamelCase_ : Optional[int] = word.index(snake_case , snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase_ : Tuple = j
if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase_ : int = tuple(snake_case )
UpperCamelCase_ : Optional[int] = new_word
if len(snake_case ) == 1:
break
else:
UpperCamelCase_ : str = get_pairs(snake_case )
UpperCamelCase_ : Tuple = '@@ '.join(snake_case )
UpperCamelCase_ : int = word[:-4]
UpperCamelCase_ : List[Any] = word
return word
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Tuple = []
UpperCamelCase_ : str = re.findall(R'\S+\n?' , snake_case )
for token in words:
split_tokens.extend(list(self.bpe(snake_case ).split(' ' ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : Dict ) -> Any:
"""simple docstring"""
return self.encoder.get(snake_case , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : int ) -> Dict:
"""simple docstring"""
return self.decoder.get(snake_case , self.unk_token )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Tuple ) -> int:
"""simple docstring"""
UpperCamelCase_ : str = ' '.join(snake_case ).replace('@@ ' , '' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase_ : int = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_ : Optional[Any] = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case , ensure_ascii=snake_case ) + '\n' )
UpperCamelCase_ : Tuple = 0
with open(snake_case , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
UpperCamelCase_ : Dict = token_index
writer.write(' '.join(snake_case ) + '\n' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 50 | import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __lowercase ( lowerCamelCase : Optional[int] ):
UpperCamelCase_ : Dict = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
UpperCamelCase_ : Tuple = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
UpperCamelCase_ : List[Any] = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase_ : Optional[Any] = key[key.find('patch_embed' ) + len('patch_embed' )]
UpperCamelCase_ : List[str] = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(lowerCamelCase )-1}" )
if "norm" in key:
UpperCamelCase_ : int = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase_ : int = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
UpperCamelCase_ : int = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(lowerCamelCase )-1}" )
if "layer_norm1" in key:
UpperCamelCase_ : Optional[int] = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
UpperCamelCase_ : str = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase_ : Union[str, Any] = key[key.find('block' ) + len('block' )]
UpperCamelCase_ : Dict = key.replace(F"block{idx}" , F"block.{int(lowerCamelCase )-1}" )
if "attn.q" in key:
UpperCamelCase_ : Optional[Any] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
UpperCamelCase_ : Any = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
UpperCamelCase_ : Optional[int] = key.replace('attn' , 'attention.self' )
if "fc1" in key:
UpperCamelCase_ : int = key.replace('fc1' , 'dense1' )
if "fc2" in key:
UpperCamelCase_ : Optional[int] = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
UpperCamelCase_ : Tuple = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
UpperCamelCase_ : Union[str, Any] = key.replace('linear_fuse.conv' , 'linear_fuse' )
UpperCamelCase_ : Tuple = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase_ : Optional[Any] = key[key.find('linear_c' ) + len('linear_c' )]
UpperCamelCase_ : List[str] = key.replace(F"linear_c{idx}" , F"linear_c.{int(lowerCamelCase )-1}" )
if "bot_conv" in key:
UpperCamelCase_ : Union[str, Any] = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
UpperCamelCase_ : Optional[int] = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
UpperCamelCase_ : List[Any] = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
UpperCamelCase_ : Tuple = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
UpperCamelCase_ : Any = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
UpperCamelCase_ : int = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
UpperCamelCase_ : Optional[Any] = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
UpperCamelCase_ : str = key.replace('module.last_layer_depth' , 'head.head' )
UpperCamelCase_ : Optional[Any] = value
return new_state_dict
def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase_ : Union[str, Any] = state_dict.pop(F"glpn.encoder.block.{i}.{j}.attention.self.kv.weight" )
UpperCamelCase_ : int = state_dict.pop(F"glpn.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
UpperCamelCase_ : Union[str, Any] = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase_ : int = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase_ : Union[str, Any] = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase_ : Optional[int] = kv_bias[config.hidden_sizes[i] :]
def __lowercase ( ):
UpperCamelCase_ : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase_ : Optional[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def __lowercase ( lowerCamelCase : Tuple , lowerCamelCase : List[str] , lowerCamelCase : Dict=False , lowerCamelCase : Optional[int]=None ):
UpperCamelCase_ : List[Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCamelCase_ : Tuple = GLPNImageProcessor()
# prepare image
UpperCamelCase_ : List[Any] = prepare_img()
UpperCamelCase_ : str = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
UpperCamelCase_ : Any = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
# rename keys
UpperCamelCase_ : str = rename_keys(lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
UpperCamelCase_ : Dict = GLPNForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
UpperCamelCase_ : Optional[Any] = model(lowerCamelCase )
UpperCamelCase_ : str = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCamelCase_ : Tuple = torch.tensor(
[[4.4_1_4_7, 4.0_8_7_3, 4.0_6_7_3], [3.7_8_9_0, 3.2_8_8_1, 3.1_5_2_5], [3.7_6_7_4, 3.5_4_2_3, 3.4_9_1_3]] )
elif "kitti" in model_name:
UpperCamelCase_ : Any = torch.tensor(
[[3.4_2_9_1, 2.7_8_6_5, 2.5_1_5_1], [3.2_8_4_1, 2.7_0_2_1, 2.3_5_0_2], [3.1_1_4_7, 2.4_6_2_5, 2.2_4_8_1]] )
else:
raise ValueError(F"Unknown model name: {model_name}" )
UpperCamelCase_ : Tuple = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
a_ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 50 | 1 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _a ( SCREAMING_SNAKE_CASE_ : Features ):
__lowerCAmelCase = np.inf
def set_batch_size(SCREAMING_SNAKE_CASE_ : FeatureType ) -> None:
nonlocal batch_size
if isinstance(_lowercase , _lowercase ):
__lowerCAmelCase = min(_lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_lowercase , _lowercase ):
__lowerCAmelCase = min(_lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_lowercase , _lowercase ) and feature.dtype == "binary":
__lowerCAmelCase = min(_lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_lowercase , _lowercase )
return None if batch_size is np.inf else batch_size
class a__ ( a__ ):
def __init__( self , _A , _A = None , _A = None , _A = None , _A = False , _A = False , _A = None , **_A , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase__ , split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , )
__lowerCAmelCase = path_or_paths if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else {self.split: path_or_paths}
__lowerCAmelCase = _PACKAGED_DATASETS_MODULES["parquet"][1]
__lowerCAmelCase = Parquet(
cache_dir=lowerCAmelCase__ , data_files=lowerCAmelCase__ , features=lowerCAmelCase__ , hash=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if self.streaming:
__lowerCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , )
__lowerCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
class a__ :
def __init__( self , _A , _A , _A = None , **_A , ):
"""simple docstring"""
__lowerCAmelCase = dataset
__lowerCAmelCase = path_or_buf
__lowerCAmelCase = batch_size or get_writer_batch_size(dataset.features )
__lowerCAmelCase = parquet_writer_kwargs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
__lowerCAmelCase = self._write(file_obj=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , **self.parquet_writer_kwargs )
else:
__lowerCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=lowerCAmelCase__ , **self.parquet_writer_kwargs )
return written
def __SCREAMING_SNAKE_CASE( self , _A , _A , **_A ):
"""simple docstring"""
__lowerCAmelCase = 0
__lowerCAmelCase = parquet_writer_kwargs.pop("path_or_buf" , lowerCAmelCase__ )
__lowerCAmelCase = self.dataset.features.arrow_schema
__lowerCAmelCase = pq.ParquetWriter(lowerCAmelCase__ , schema=lowerCAmelCase__ , **lowerCAmelCase__ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , lowerCAmelCase__ ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
__lowerCAmelCase = query_table(
table=self.dataset._data , key=slice(lowerCAmelCase__ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(lowerCAmelCase__ )
written += batch.nbytes
writer.close()
return written
| 92 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->str:
'''simple docstring'''
a : Union[str, Any] = os.path.join(args.tf_model_dir , "parameters.json" )
a : str = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
a : str = args.output + ".pt"
a : Dict = OrderedDict()
with tf.device("/CPU:0" ):
a : Optional[int] = tf.train.load_checkpoint(args.tf_model_dir )
a : Optional[Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
a : Dict = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
a : Union[str, Any] = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
a : Optional[int] = 8
a : Dict = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
a : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : Dict = torch.tensor(_lowercase )
elif key_name.startswith("model/moe" ):
a : List[str] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
a : str = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
a : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : List[str] = torch.tensor(_lowercase )
elif key_name.endswith("/softmlp/kernel" ):
a : Optional[int] = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
a : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : List[Any] = torch.tensor(_lowercase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
a : Any = key_name[-9:-7]
for i in range(16 ):
a : List[Any] = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
a : str = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
a : Dict = torch.tensor(_lowercase )
elif key_name.startswith("model/mlp" ):
a : Union[str, Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
a : str = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
a : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : Optional[int] = torch.tensor(_lowercase )
elif key_name.endswith("/p1/bias" ):
a : str = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
a : List[Any] = vnp.copy() # same because it is one dimensional
a : Tuple = torch.tensor(_lowercase )
elif key_name.endswith("/p2/kernel" ):
a : Union[str, Any] = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
a : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : List[Any] = torch.tensor(_lowercase )
elif key_name.endswith("/p2/bias" ):
a : Dict = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
a : List[str] = vnp.copy() # same because it is one dimensional
a : str = torch.tensor(_lowercase )
elif key_name.startswith("model/ln" ):
a : List[str] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
a : Optional[Any] = "model.blocks.%d.feed_forward.norm.bias" % player
a : Tuple = vnp.copy() # same because it is one dimensional
a : int = torch.tensor(_lowercase )
elif key_name.endswith("/g" ):
a : Optional[Any] = "model.blocks.%d.feed_forward.norm.weight" % player
a : List[str] = vnp.copy() # same because it is one dimensional
a : Tuple = torch.tensor(_lowercase )
elif key_name.startswith("model/att" ):
a : Optional[Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
a : Union[str, Any] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
a : List[str] = state[:, 0, :, :]
a : Dict = state[:, 1, :, :]
a : Union[str, Any] = state[:, 2, :, :]
a : str = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a : List[str] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a : Dict = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a : List[Any] = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
a : Union[str, Any] = torch.tensor(_lowercase )
a : Any = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
a : List[str] = torch.tensor(_lowercase )
a : Optional[Any] = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
a : Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("/o/kernel" ):
a : Any = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
a : Optional[int] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
a : Tuple = torch.tensor(_lowercase )
elif key_name.startswith("model/an" ):
a : List[str] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
a : Optional[int] = "model.blocks.%d.self_attn.norm.bias" % player
a : Union[str, Any] = vnp.copy() # same because it is one dimensional
a : List[Any] = torch.tensor(_lowercase )
elif key_name.endswith("/g" ):
a : Any = "model.blocks.%d.self_attn.norm.weight" % player
a : str = vnp.copy() # same because it is one dimensional
a : Any = torch.tensor(_lowercase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
a : Optional[int] = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
a : Tuple = "model.%s.weight" % nlayer
a : Any = vnp.copy() # same in embedded
a : Tuple = torch.tensor(_lowercase )
if key_name.startswith("model/wte" ):
a : Optional[int] = "lm_head.weight"
a : Optional[int] = vnp.copy() # same in embedded
a : Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("model/wob" ):
a : Optional[int] = "final_logits_bias"
a : Optional[Any] = vnp.copy() # same in embedded
a : Optional[int] = state.reshape((1, -1) )
a : List[Any] = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
a : Optional[int] = "model.last_project.weight"
a : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : List[Any] = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
a : Dict = "model.last_project.bias"
a : Optional[Any] = vnp.copy() # same because it is one dimensional
a : Any = torch.tensor(_lowercase )
torch.save(_lowercase , args.output )
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
a : Tuple = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 105 | 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = tmp_path / "cache"
_lowerCAmelCase : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCAmelCase : Dict = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = tmp_path / "cache"
_lowerCAmelCase : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCAmelCase : List[Any] = features.copy() if features else default_expected_features
_lowerCAmelCase : Dict = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCAmelCase : str = ParquetDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = tmp_path / "cache"
_lowerCAmelCase : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCAmelCase : Dict = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if issubclass(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : int = parquet_path
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Any = [parquet_path]
_lowerCAmelCase : Dict = tmp_path / "cache"
_lowerCAmelCase : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCAmelCase : Any = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=("train",) ):
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
for split in splits:
_lowerCAmelCase : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = tmp_path / "cache"
_lowerCAmelCase : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCAmelCase : List[Any] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tmp_path / "cache"
_lowerCAmelCase : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCAmelCase : Union[str, Any] = features.copy() if features else default_expected_features
_lowerCAmelCase : Any = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCAmelCase : Any = ParquetDatasetReader({"train": parquet_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if split:
_lowerCAmelCase : Dict = {split: parquet_path}
else:
_lowerCAmelCase : Tuple = "train"
_lowerCAmelCase : List[str] = {"train": parquet_path, "test": parquet_path}
_lowerCAmelCase : Any = tmp_path / "cache"
_lowerCAmelCase : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCAmelCase : Tuple = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ParquetDatasetWriter(_lowerCamelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_lowerCAmelCase : str = pq.ParquetFile(tmp_path / "foo.parquet" )
_lowerCAmelCase : List[Any] = pf.read()
assert dataset.data.table == output_table
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = str(shared_datadir / "test_image_rgb.jpg" )
_lowerCAmelCase : Union[str, Any] = {"image": [image_path]}
_lowerCAmelCase : List[Any] = Features({"image": Image()} )
_lowerCAmelCase : Dict = Dataset.from_dict(_lowerCamelCase , features=_lowerCamelCase )
_lowerCAmelCase : str = ParquetDatasetWriter(_lowerCamelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_lowerCAmelCase : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
_lowerCAmelCase : int = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_lowerCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert get_writer_batch_size(_lowerCamelCase ) == expected
| 300 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_snake_case = 1.0_5457_1817e-34 # unit of ℏ : J * s
_snake_case = 3e8 # unit of c : m * s^-1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_lowerCAmelCase : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a_ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(a_ , '''depth_multiplier''' ) )
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : List[str] , a_ : int , a_ : List[str]=13 , a_ : str=3 , a_ : Any=32 , a_ : Any=0.2_5 , a_ : Optional[int]=8 , a_ : List[str]=True , a_ : Tuple=10_24 , a_ : Union[str, Any]=32 , a_ : Dict="relu6" , a_ : List[str]=0.1 , a_ : str=0.0_2 , a_ : Optional[int]=True , a_ : Dict=True , a_ : Union[str, Any]=10 , a_ : str=None , ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Any = num_channels
__UpperCAmelCase : Optional[Any] = image_size
__UpperCAmelCase : List[str] = depth_multiplier
__UpperCAmelCase : Tuple = min_depth
__UpperCAmelCase : List[Any] = tf_padding
__UpperCAmelCase : Any = int(last_hidden_size * depth_multiplier )
__UpperCAmelCase : Optional[Any] = output_stride
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : List[str] = classifier_dropout_prob
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : Optional[int] = num_labels
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : Optional[Any] = scope
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case__ ( self : List[str] ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def snake_case__ ( self : int , a_ : Optional[int] , a_ : Union[str, Any] , a_ : str , a_ : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = MobileNetVaModel(config=a_ )
model.to(a_ )
model.eval()
__UpperCAmelCase : Any = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case__ ( self : Union[str, Any] , a_ : List[str] , a_ : List[str] , a_ : str , a_ : Any ):
'''simple docstring'''
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : int = MobileNetVaForImageClassification(a_ )
model.to(a_ )
model.eval()
__UpperCAmelCase : Any = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def snake_case__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Any = MobileNetVaModelTester(self )
__UpperCAmelCase : Tuple = MobileNetVaConfigTester(self , config_class=a_ , has_text_modality=a_ )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def snake_case__ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def snake_case__ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class(a_ )
__UpperCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a_ )
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def snake_case__ ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(a_ : Optional[Any] , a_ : Dict , a_ : Optional[int] ):
__UpperCAmelCase : Any = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Dict = model(**self._prepare_for_class(a_ , a_ ) )
__UpperCAmelCase : Optional[Any] = outputs.hidden_states
__UpperCAmelCase : str = 26
self.assertEqual(len(a_ ) , a_ )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[Any] = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Dict = True
check_hidden_states_output(a_ , a_ , a_ )
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = MobileNetVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def a ( ):
'''simple docstring'''
__UpperCAmelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self : List[str] ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(a_ )
__UpperCAmelCase : List[Any] = self.default_image_processor
__UpperCAmelCase : List[Any] = prepare_img()
__UpperCAmelCase : Optional[Any] = image_processor(images=a_ , return_tensors='''pt''' ).to(a_ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Dict = model(**a_ )
# verify the logits
__UpperCAmelCase : int = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , a_ )
__UpperCAmelCase : str = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) )
| 226 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__A =""
__A =""
__A =""
__A =1 # (0 is vertical, 1 is horizontal)
def a ( ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = get_dataset(_UpperCAmelCase , _UpperCAmelCase )
print('''Processing...''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = update_image_and_anno(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for index, image in enumerate(_UpperCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCAmelCase : Any = random_chars(32 )
__UpperCAmelCase : List[str] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCAmelCase : Optional[Any] = f'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(f'/{file_root}.jpg' , _UpperCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'Success {index+1}/{len(_UpperCAmelCase )} with {file_name}' )
__UpperCAmelCase : Optional[Any] = []
for anno in new_annos[index]:
__UpperCAmelCase : Union[str, Any] = f'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(_UpperCAmelCase )
with open(f'/{file_root}.txt' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def a ( _UpperCAmelCase : str , _UpperCAmelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = []
for label_file in glob.glob(os.path.join(_UpperCAmelCase , '''*.txt''' ) ):
__UpperCAmelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(_UpperCAmelCase ) as in_file:
__UpperCAmelCase : List[str] = in_file.readlines()
__UpperCAmelCase : Optional[Any] = os.path.join(_UpperCAmelCase , f'{label_name}.jpg' )
__UpperCAmelCase : str = []
for obj_list in obj_lists:
__UpperCAmelCase : str = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_UpperCAmelCase )
labels.append(_UpperCAmelCase )
return img_paths, labels
def a ( _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : int = 1 ):
'''simple docstring'''
__UpperCAmelCase : Dict = []
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Any = []
for idx in range(len(_UpperCAmelCase ) ):
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : List[Any] = img_list[idx]
path_list.append(_UpperCAmelCase )
__UpperCAmelCase : str = anno_list[idx]
__UpperCAmelCase : str = cva.imread(_UpperCAmelCase )
if flip_type == 1:
__UpperCAmelCase : Any = cva.flip(_UpperCAmelCase , _UpperCAmelCase )
for bbox in img_annos:
__UpperCAmelCase : List[str] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__UpperCAmelCase : Any = cva.flip(_UpperCAmelCase , _UpperCAmelCase )
for bbox in img_annos:
__UpperCAmelCase : Union[str, Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_UpperCAmelCase )
new_imgs_list.append(_UpperCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def a ( _UpperCAmelCase : int = 32 ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__UpperCAmelCase : Union[str, Any] = ascii_lowercase + digits
return "".join(random.choice(_UpperCAmelCase ) for _ in range(_UpperCAmelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 226 | 1 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : Union[str, Any] , __lowerCamelCase : Callable , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[dict] = None , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : str , ):
super().__init__(
features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase :Optional[int] = Generator(
cache_dir=__lowerCamelCase , features=__lowerCamelCase , generator=__lowerCamelCase , gen_kwargs=__lowerCamelCase , **__lowerCamelCase , )
def _A ( self : List[str] ):
# Build iterable dataset
if self.streaming:
UpperCamelCase :int = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
UpperCamelCase :int = None
UpperCamelCase :List[str] = None
UpperCamelCase :List[str] = None
UpperCamelCase :str = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
UpperCamelCase :Optional[int] = self.builder.as_dataset(
split="""train""" , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 62 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : str = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : List[Any] = DebertaVaTokenizer
snake_case__ : Any = DebertaVaTokenizerFast
snake_case__ : Union[str, Any] = True
snake_case__ : Tuple = True
def _A ( self : Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase :Tuple = DebertaVaTokenizer(__lowerCamelCase , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self : int , __lowerCamelCase : Union[str, Any] ):
UpperCamelCase :str = """this is a test"""
UpperCamelCase :Dict = """this is a test"""
return input_text, output_text
def _A ( self : Tuple ):
UpperCamelCase :Optional[Any] = """<pad>"""
UpperCamelCase :Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def _A ( self : int ):
UpperCamelCase :Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(__lowerCamelCase ) , 30_001 )
def _A ( self : Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def _A ( self : str ):
# fmt: off
UpperCamelCase :Optional[int] = """ \tHeLLo!how \n Are yoU? """
UpperCamelCase :Any = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
UpperCamelCase :Optional[Any] = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase )
UpperCamelCase :Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Any = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase )
UpperCamelCase :List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def _A ( self : Dict ):
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def _A ( self : Optional[Any] ):
pass
def _A ( self : Optional[int] ):
# fmt: off
UpperCamelCase :Union[str, Any] = """I was born in 92000, and this is falsé."""
UpperCamelCase :int = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase :int = DebertaVaTokenizer(__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :int = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Optional[int] = DebertaVaTokenizerFast(__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : int ):
# fmt: off
UpperCamelCase :Union[str, Any] = """I was born in 92000, and this is falsé."""
UpperCamelCase :Any = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase :Tuple = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Any ):
# fmt: off
UpperCamelCase :Union[str, Any] = """I was born in 92000, and this is falsé."""
UpperCamelCase :List[Any] = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
UpperCamelCase :Tuple = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : str ):
# fmt: off
UpperCamelCase :List[str] = """I was born in 92000, and this is falsé."""
UpperCamelCase :int = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase :List[str] = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[str] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[Any] ):
# fmt: off
UpperCamelCase :Optional[Any] = """ \tHeLLo!how \n Are yoU? """
UpperCamelCase :Dict = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
UpperCamelCase :int = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Any = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : int ):
UpperCamelCase :int = self.get_tokenizer()
UpperCamelCase :str = self.get_rust_tokenizer()
UpperCamelCase :Dict = """I was born in 92000, and this is falsé."""
UpperCamelCase :List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
UpperCamelCase :Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[str] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
UpperCamelCase :Optional[int] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :int = self.get_rust_tokenizer()
UpperCamelCase :Tuple = tokenizer.encode(__lowerCamelCase )
UpperCamelCase :Dict = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Dict ):
UpperCamelCase :Optional[int] = """This is a test"""
UpperCamelCase :str = [13, 1, 4_398, 25, 21, 1_289]
UpperCamelCase :int = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
UpperCamelCase :Any = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
UpperCamelCase :str = DebertaVaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = DebertaVaTokenizerFast(__lowerCamelCase , keep_accents=__lowerCamelCase )
UpperCamelCase :Optional[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Any = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Optional[Any] = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :int = rust_tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
# fmt: off
UpperCamelCase :Optional[Any] = """I was born in 92000, and this is falsé."""
UpperCamelCase :Any = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
UpperCamelCase :Union[str, Any] = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
UpperCamelCase :Optional[Any] = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
UpperCamelCase :str = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Any = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :int = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Dict = rust_tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[int] ):
UpperCamelCase :str = DebertaVaTokenizer(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = tokenizer.encode("""sequence builders""" )
UpperCamelCase :Any = tokenizer.encode("""multi-sequence build""" )
UpperCamelCase :Optional[int] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
UpperCamelCase :str = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __lowerCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __lowerCamelCase , )
@slow
def _A ( self : List[Any] ):
# fmt: off
UpperCamelCase :Union[str, Any] = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 62 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> None:
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.', lowerCAmelCase__, )
super().__init__(*lowerCAmelCase__, **lowerCAmelCase__)
| 69 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCAmelCase : List[str] = 16
_UpperCAmelCase : Any = 32
def A ( lowercase , lowercase = 16 , lowercase = "bert-base-cased" ) -> Any:
'''simple docstring'''
UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase = datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=SCREAMING_SNAKE_CASE__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config['''lr''']
UpperCamelCase = int(config['num_epochs'] )
UpperCamelCase = int(config['seed'] )
UpperCamelCase = int(config['batch_size'] )
UpperCamelCase = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = get_dataloaders(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
# Instantiate optimizer
UpperCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCamelCase = 1
UpperCamelCase = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE__ , )
else:
UpperCamelCase = DummyScheduler(SCREAMING_SNAKE_CASE__ , total_num_steps=SCREAMING_SNAKE_CASE__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase = 0
# Now we train the model
UpperCamelCase = evaluate.load('glue' , 'mrpc' )
UpperCamelCase = 0
UpperCamelCase = {}
for epoch in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase = model(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = outputs.loss
UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCamelCase = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE__ ) - 1:
UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
UpperCamelCase = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def A ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=SCREAMING_SNAKE_CASE__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE__ , )
parser.add_argument(
'--output_dir' , type=SCREAMING_SNAKE_CASE__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=SCREAMING_SNAKE_CASE__ , default=3 , help='Number of train epochs.' , )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 367 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def A ( ) -> int:
'''simple docstring'''
UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
UpperCamelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert('RGB' )
return image
def A ( lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def A ( lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = dct.pop(lowercase )
UpperCamelCase = val
def A ( lowercase , lowercase ) -> List[str]:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCamelCase = torch.cat((q_bias, torch.zeros_like(lowercase , requires_grad=lowercase ), v_bias) )
UpperCamelCase = qkv_bias
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 364 if 'coco' in model_name else 224
UpperCamelCase = BlipaVisionConfig(image_size=lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
UpperCamelCase = BlipaConfig(vision_config=lowercase , text_config=lowercase )
return config, image_size
@torch.no_grad()
def A ( lowercase , lowercase=None , lowercase=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
UpperCamelCase = tokenizer('\n' , add_special_tokens=lowercase ).input_ids[0]
UpperCamelCase , UpperCamelCase = get_blipa_config(lowercase , eos_token_id=lowercase )
UpperCamelCase = BlipaForConditionalGeneration(lowercase ).eval()
UpperCamelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
UpperCamelCase , UpperCamelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCamelCase , UpperCamelCase , UpperCamelCase = load_model_and_preprocess(
name=lowercase , model_type=lowercase , is_eval=lowercase , device=lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
UpperCamelCase = original_model.state_dict()
UpperCamelCase = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCamelCase = state_dict.pop(lowercase )
if key.startswith('Qformer.bert' ):
UpperCamelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
UpperCamelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
UpperCamelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
UpperCamelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
UpperCamelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
UpperCamelCase = key.replace('t5' , 'language' )
UpperCamelCase = val
# read in qv biases
read_in_q_v_bias(lowercase , lowercase )
UpperCamelCase , UpperCamelCase = hf_model.load_state_dict(lowercase , strict=lowercase )
assert len(lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCamelCase = load_demo_image()
UpperCamelCase = vis_processors['eval'](lowercase ).unsqueeze(0 ).to(lowercase )
UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(lowercase )
# create processor
UpperCamelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=lowercase , image_std=lowercase )
UpperCamelCase = BlipaProcessor(image_processor=lowercase , tokenizer=lowercase )
UpperCamelCase = processor(images=lowercase , return_tensors='pt' ).pixel_values.to(lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase , lowercase )
original_model.to(lowercase )
hf_model.to(lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
UpperCamelCase = hf_model(lowercase , lowercase ).logits
else:
UpperCamelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
UpperCamelCase = hf_model(lowercase , lowercase , labels=lowercase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCamelCase = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=lowercase )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCamelCase = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=lowercase )
else:
# cast to same type
UpperCamelCase = logits.dtype
assert torch.allclose(original_logits.to(lowercase ) , lowercase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
UpperCamelCase = ''
UpperCamelCase = tokenizer(lowercase , return_tensors='pt' ).input_ids.to(lowercase )
UpperCamelCase = original_model.generate({'image': original_pixel_values} )
UpperCamelCase = hf_model.generate(
lowercase , lowercase , do_sample=lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , lowercase )
UpperCamelCase = input_ids.shape[1]
UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase )
UpperCamelCase = [text.strip() for text in output_text]
print('HF generation:' , lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
_UpperCAmelCase : str = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 110 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : str =(IPNDMScheduler,)
a : int =(("num_inference_steps", 50),)
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = {"num_train_timesteps": 1_000}
config.update(**snake_case__ )
return config
def lowercase__ ( self , snake_case__=0 , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = dict(self.forward_default_kwargs )
lowerCAmelCase : List[str] = kwargs.pop("num_inference_steps" , snake_case__ )
lowerCAmelCase : Optional[int] = self.dummy_sample
lowerCAmelCase : Dict = 0.1 * sample
lowerCAmelCase : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : List[Any] = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : List[str] = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowerCAmelCase : str = dummy_past_residuals[:]
if time_step is None:
lowerCAmelCase : List[str] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowerCAmelCase : List[Any] = scheduler_class.from_pretrained(snake_case__ )
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowerCAmelCase : Union[str, Any] = dummy_past_residuals[:]
lowerCAmelCase : Dict = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : List[str] = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : List[Any] = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self , snake_case__=0 , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = dict(self.forward_default_kwargs )
lowerCAmelCase : List[Any] = kwargs.pop("num_inference_steps" , snake_case__ )
lowerCAmelCase : int = self.dummy_sample
lowerCAmelCase : Optional[Any] = 0.1 * sample
lowerCAmelCase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : Any = self.get_scheduler_config()
lowerCAmelCase : Tuple = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase : Tuple = dummy_past_residuals[:]
if time_step is None:
lowerCAmelCase : Dict = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowerCAmelCase : Dict = scheduler_class.from_pretrained(snake_case__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase : List[str] = dummy_past_residuals[:]
lowerCAmelCase : Dict = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : str = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase : List[str] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : Any = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = self.scheduler_classes[0]
lowerCAmelCase : List[str] = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : Any = scheduler_class(**snake_case__ )
lowerCAmelCase : int = 10
lowerCAmelCase : List[Any] = self.dummy_model()
lowerCAmelCase : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase : List[Any] = model(snake_case__ , snake_case__ )
lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase : Tuple = model(snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
return sample
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = dict(self.forward_default_kwargs )
lowerCAmelCase : str = kwargs.pop("num_inference_steps" , snake_case__ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : Dict = self.get_scheduler_config()
lowerCAmelCase : Optional[Any] = scheduler_class(**snake_case__ )
lowerCAmelCase : int = self.dummy_sample
lowerCAmelCase : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case__ , "set_timesteps" ):
scheduler.set_timesteps(snake_case__ )
elif num_inference_steps is not None and not hasattr(snake_case__ , "set_timesteps" ):
lowerCAmelCase : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCAmelCase : Union[str, Any] = dummy_past_residuals[:]
lowerCAmelCase : Union[str, Any] = scheduler.timesteps[5]
lowerCAmelCase : List[Any] = scheduler.timesteps[6]
lowerCAmelCase : Optional[Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : Any = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : int = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase__ ( self ):
"""simple docstring"""
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case__ , time_step=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=snake_case__ , time_step=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.full_loop()
lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 108 |
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ = "" , snake_case__ = False ):
"""simple docstring"""
lowerCAmelCase : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase : str = is_leaf
lowerCAmelCase : str = prefix
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = 0
for q, w in zip(self.prefix , snake_case__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
for word in words:
self.insert(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.prefix == word:
lowerCAmelCase : Union[str, Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase : Optional[Any] = RadixNode(prefix=snake_case__ , is_leaf=snake_case__ )
else:
lowerCAmelCase : Tuple = self.nodes[word[0]]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = incoming_node.match(
snake_case__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(snake_case__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase : Optional[Any] = remaining_prefix
lowerCAmelCase : int = self.nodes[matching_string[0]]
lowerCAmelCase : List[Any] = RadixNode(snake_case__ , snake_case__ )
lowerCAmelCase : Optional[int] = aux_node
if remaining_word == "":
lowerCAmelCase : Optional[int] = True
else:
self.nodes[matching_string[0]].insert(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = self.nodes.get(word[0] , snake_case__ )
if not incoming_node:
return False
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = self.nodes.get(word[0] , snake_case__ )
if not incoming_node:
return False
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(snake_case__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase : List[str] = list(self.nodes.values() )[0]
lowerCAmelCase : List[str] = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase : Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase : Optional[int] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase : Optional[Any] = list(incoming_node.nodes.values() )[0]
lowerCAmelCase : int = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase : Tuple = merging_node.nodes
return True
def lowercase__ ( self , snake_case__ = 0 ):
"""simple docstring"""
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = "banana bananas bandana band apple all beast".split()
lowerCAmelCase : List[str] = RadixNode()
root.insert_many(SCREAMING_SNAKE_CASE )
assert all(root.find(SCREAMING_SNAKE_CASE ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def a__ ( ):
'''simple docstring'''
assert test_trie()
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Dict = RadixNode()
lowerCAmelCase : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(SCREAMING_SNAKE_CASE )
print("Words:" , SCREAMING_SNAKE_CASE )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 108 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case : List[Any] = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case : Any = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case : List[Any] = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case : str = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_snake_case : Optional[int] = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_snake_case : Optional[int] = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_snake_case : str = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case : List[Any] = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case : int = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : Dict = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : Optional[Any] = DPRContextEncoderTokenizer
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : int = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Any = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : Dict = DPRQuestionEncoderTokenizer
_snake_case : List[str] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_snake_case : List[Any] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_snake_case : List[Any] = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_lowerCAmelCase )
class a :
"""simple docstring"""
def __call__( self : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[str] = None , lowerCamelCase : Optional[str] = None , lowerCamelCase : Union[bool, str] = False , lowerCamelCase : Union[bool, str] = False , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : Optional[bool] = None , **lowerCamelCase : List[str] , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , return_tensors=lowerCamelCase , return_attention_mask=lowerCamelCase , **lowerCamelCase , )
elif titles is None or texts is None:
__snake_case : Any = titles if texts is None else texts
return super().__call__(
lowerCamelCase , lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , return_tensors=lowerCamelCase , return_attention_mask=lowerCamelCase , **lowerCamelCase , )
__snake_case : Any = titles if not isinstance(lowerCamelCase , lowerCamelCase ) else [titles]
__snake_case : Tuple = texts if not isinstance(lowerCamelCase , lowerCamelCase ) else [texts]
__snake_case : Tuple = len(lowerCamelCase )
__snake_case : Optional[int] = questions if not isinstance(lowerCamelCase , lowerCamelCase ) else [questions] * n_passages
assert len(lowerCamelCase ) == len(
lowerCamelCase ), F'There should be as many titles than texts but got {len(lowerCamelCase )} titles and {len(lowerCamelCase )} texts.'
__snake_case : Dict = super().__call__(lowerCamelCase , lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase )["input_ids"]
__snake_case : List[str] = super().__call__(lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase )["input_ids"]
__snake_case : Dict = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase , lowerCamelCase )
]
}
if return_attention_mask is not False:
__snake_case : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__snake_case : Any = attention_mask
return self.pad(lowerCamelCase , padding=lowerCamelCase , max_length=lowerCamelCase , return_tensors=lowerCamelCase )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : BatchEncoding , lowerCamelCase : DPRReaderOutput , lowerCamelCase : int = 16 , lowerCamelCase : int = 64 , lowerCamelCase : int = 4 , ) -> List[DPRSpanPrediction]:
__snake_case : Any = reader_input["input_ids"]
__snake_case , __snake_case , __snake_case : Optional[int] = reader_output[:3]
__snake_case : Union[str, Any] = len(lowerCamelCase )
__snake_case : Any = sorted(range(lowerCamelCase ) , reverse=lowerCamelCase , key=relevance_logits.__getitem__ )
__snake_case : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__snake_case : List[str] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__snake_case : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__snake_case : Optional[int] = sequence_ids.index(self.pad_token_id )
else:
__snake_case : Tuple = len(lowerCamelCase )
__snake_case : str = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCamelCase , top_spans=lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCamelCase , start_index=lowerCamelCase , end_index=lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __snake_case ( self : str , lowerCamelCase : List[int] , lowerCamelCase : List[int] , lowerCamelCase : int , lowerCamelCase : int , ) -> List[DPRSpanPrediction]:
__snake_case : Any = []
for start_index, start_score in enumerate(lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__snake_case : str = sorted(lowerCamelCase , key=lambda lowerCamelCase : x[1] , reverse=lowerCamelCase )
__snake_case : int = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'Wrong span indices: [{start_index}:{end_index}]'
__snake_case : str = end_index - start_index + 1
assert length <= max_answer_length, F'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_lowerCAmelCase )
class a (_lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Any = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[Any] = READER_PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : Union[str, Any] = ["input_ids", "attention_mask"]
__UpperCAmelCase : str = DPRReaderTokenizer
| 134 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
_snake_case : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case : Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n"
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=8 ):
__snake_case : List[Any] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__snake_case : Optional[int] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : MultilingualCLIP , lowerCamelCase : XLMRobertaTokenizer , lowerCamelCase : UNetaDConditionModel , lowerCamelCase : Union[DDIMScheduler, DDPMScheduler] , lowerCamelCase : VQModel , ) -> Optional[int]:
super().__init__()
self.register_modules(
text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , movq=lowerCamelCase , )
__snake_case : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __snake_case ( self : Any , lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : int ) -> Any:
if latents is None:
__snake_case : str = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=lowerCamelCase , dtype=lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__snake_case : Optional[int] = latents.to(lowerCamelCase )
__snake_case : List[Any] = latents * scheduler.init_noise_sigma
return latents
def __snake_case ( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str=None , ) -> List[str]:
__snake_case : Tuple = len(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else 1
# get prompt text embeddings
__snake_case : Optional[int] = self.tokenizer(
lowerCamelCase , padding="max_length" , truncation=lowerCamelCase , max_length=77 , return_attention_mask=lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors="pt" , )
__snake_case : List[str] = text_inputs.input_ids
__snake_case : List[Any] = self.tokenizer(lowerCamelCase , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowerCamelCase , lowerCamelCase ):
__snake_case : Optional[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__snake_case : Any = text_input_ids.to(lowerCamelCase )
__snake_case : List[str] = text_inputs.attention_mask.to(lowerCamelCase )
__snake_case , __snake_case : List[str] = self.text_encoder(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
__snake_case : List[Any] = prompt_embeds.repeat_interleave(lowerCamelCase , dim=0 )
__snake_case : List[str] = text_encoder_hidden_states.repeat_interleave(lowerCamelCase , dim=0 )
__snake_case : Optional[int] = text_mask.repeat_interleave(lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Any = [""] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !='
F' {type(lowerCamelCase )}.' )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : List[Any] = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
" the batch size of `prompt`." )
else:
__snake_case : int = negative_prompt
__snake_case : Dict = self.tokenizer(
lowerCamelCase , padding="max_length" , max_length=77 , truncation=lowerCamelCase , return_attention_mask=lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors="pt" , )
__snake_case : Dict = uncond_input.input_ids.to(lowerCamelCase )
__snake_case : List[Any] = uncond_input.attention_mask.to(lowerCamelCase )
__snake_case , __snake_case : Tuple = self.text_encoder(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Dict = negative_prompt_embeds.shape[1]
__snake_case : int = negative_prompt_embeds.repeat(1 , lowerCamelCase )
__snake_case : List[str] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowerCamelCase )
__snake_case : Union[str, Any] = uncond_text_encoder_hidden_states.shape[1]
__snake_case : Tuple = uncond_text_encoder_hidden_states.repeat(1 , lowerCamelCase , 1 )
__snake_case : str = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , lowerCamelCase , -1 )
__snake_case : Optional[int] = uncond_text_mask.repeat_interleave(lowerCamelCase , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
__snake_case : List[Any] = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__snake_case : Any = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __snake_case ( self : List[str] , lowerCamelCase : Dict=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__snake_case : Optional[int] = torch.device(F'cuda:{gpu_id}' )
__snake_case : Optional[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase , lowerCamelCase )
def __snake_case ( self : List[Any] , lowerCamelCase : int=0 ) -> Optional[int]:
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
__snake_case : Optional[Any] = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__snake_case : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__snake_case , __snake_case : List[Any] = cpu_offload_with_hook(lowerCamelCase , lowerCamelCase , prev_module_hook=lowerCamelCase )
if self.safety_checker is not None:
__snake_case , __snake_case : Optional[int] = cpu_offload_with_hook(self.safety_checker , lowerCamelCase , prev_module_hook=lowerCamelCase )
# We'll offload the last model manually.
__snake_case : str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self : List[Any] ) -> Optional[int]:
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase )
def __call__( self : Dict , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 100 , lowerCamelCase : float = 4.0 , lowerCamelCase : int = 1 , lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , ) -> List[Any]:
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Optional[int] = 1
elif isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : List[Any] = len(lowerCamelCase )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}' )
__snake_case : Any = self._execution_device
__snake_case : Any = batch_size * num_images_per_prompt
__snake_case : Any = guidance_scale > 1.0
__snake_case , __snake_case , __snake_case : Optional[Any] = self._encode_prompt(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : List[Any] = torch.cat(lowerCamelCase , dim=0 )
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : str = torch.cat(lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
__snake_case : Dict = image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
__snake_case : Optional[Any] = negative_image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
__snake_case : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=lowerCamelCase )
self.scheduler.set_timesteps(lowerCamelCase , device=lowerCamelCase )
__snake_case : Tuple = self.scheduler.timesteps
__snake_case : Union[str, Any] = self.unet.config.in_channels
__snake_case , __snake_case : Tuple = get_new_h_w(lowerCamelCase , lowerCamelCase , self.movq_scale_factor )
# create initial latent
__snake_case : Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , lowerCamelCase , lowerCamelCase , lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : int = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
__snake_case : Optional[Any] = self.unet(
sample=lowerCamelCase , timestep=lowerCamelCase , encoder_hidden_states=lowerCamelCase , added_cond_kwargs=lowerCamelCase , return_dict=lowerCamelCase , )[0]
if do_classifier_free_guidance:
__snake_case , __snake_case : Any = noise_pred.split(latents.shape[1] , dim=1 )
__snake_case , __snake_case : Union[str, Any] = noise_pred.chunk(2 )
__snake_case , __snake_case : str = variance_pred.chunk(2 )
__snake_case : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__snake_case : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__snake_case , __snake_case : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__snake_case : str = self.scheduler.step(
lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase , ).prev_sample
# post-processing
__snake_case : str = self.movq.decode(lowerCamelCase , force_not_quantize=lowerCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__snake_case : Union[str, Any] = image * 0.5 + 0.5
__snake_case : Union[str, Any] = image.clamp(0 , 1 )
__snake_case : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case : str = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 134 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A: List[Any] = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: str = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Tuple = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'gpt_neox_japanese'
def __init__( self , lowercase=32000 , lowercase=2560 , lowercase=32 , lowercase=32 , lowercase=4 , lowercase="gelu" , lowercase=1.00 , lowercase=10000 , lowercase=2048 , lowercase=0.02 , lowercase=1e-5 , lowercase=True , lowercase=31996 , lowercase=31999 , lowercase=0.1 , lowercase=0.0 , **lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_multiple_size
A__ = hidden_act
A__ = rotary_pct
A__ = rotary_emb_base
A__ = initializer_range
A__ = layer_norm_eps
A__ = use_cache
A__ = attention_dropout
A__ = hidden_dropout
| 68 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=7 ,__UpperCAmelCase=3 ,__UpperCAmelCase=18 ,__UpperCAmelCase=30 ,__UpperCAmelCase=400 ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,__UpperCAmelCase=True ,) -> List[Any]:
lowerCAmelCase__ : Optional[int] = size if size is not None else {'height': 18, 'width': 18}
lowerCAmelCase__ : str = parent
lowerCAmelCase__ : Optional[Any] = batch_size
lowerCAmelCase__ : Optional[int] = num_channels
lowerCAmelCase__ : int = image_size
lowerCAmelCase__ : int = min_resolution
lowerCAmelCase__ : Optional[int] = max_resolution
lowerCAmelCase__ : Union[str, Any] = do_resize
lowerCAmelCase__ : Dict = size
lowerCAmelCase__ : int = apply_ocr
def UpperCAmelCase_ ( self ) -> str:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : List[Any] = LayoutLMvaImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase ,"""size""" ) )
self.assertTrue(hasattr(__UpperCAmelCase ,"""apply_ocr""" ) )
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 18} )
lowerCAmelCase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} )
def UpperCAmelCase_ ( self ) -> List[Any]:
pass
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
lowerCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,Image.Image )
# Test not batched input
lowerCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
self.assertIsInstance(encoding.words ,__UpperCAmelCase )
self.assertIsInstance(encoding.boxes ,__UpperCAmelCase )
# Test batched
lowerCAmelCase__ : List[Any] = image_processing(__UpperCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def UpperCAmelCase_ ( self ) -> str:
# Initialize image_processing
lowerCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase ,numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,np.ndarray )
# Test not batched input
lowerCAmelCase__ : int = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
lowerCAmelCase__ : List[Any] = image_processing(__UpperCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def UpperCAmelCase_ ( self ) -> str:
# Initialize image_processing
lowerCAmelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase ,torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,torch.Tensor )
# Test not batched input
lowerCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
lowerCAmelCase__ : Dict = image_processing(__UpperCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def UpperCAmelCase_ ( self ) -> Optional[int]:
# with apply_OCR = True
lowerCAmelCase__ : Optional[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCAmelCase__ : Tuple = load_dataset("""hf-internal-testing/fixtures_docvqa""" ,split="""test""" )
lowerCAmelCase__ : Tuple = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
lowerCAmelCase__ : Dict = image_processing(__UpperCAmelCase ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCAmelCase__ : Tuple = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
lowerCAmelCase__ : List[str] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,__UpperCAmelCase )
self.assertListEqual(encoding.boxes ,__UpperCAmelCase )
# with apply_OCR = False
lowerCAmelCase__ : Tuple = LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase )
lowerCAmelCase__ : Dict = image_processing(__UpperCAmelCase ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 362 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 184 | 0 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Any =FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
UpperCAmelCase : Tuple =AutoTokenizer.from_pretrained('''google/mt5-small''' )
UpperCAmelCase : List[str] =tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
UpperCAmelCase : List[Any] =tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
UpperCAmelCase : Union[str, Any] =shift_tokens_right(snake_case__ , model.config.pad_token_id , model.config.decoder_start_token_id )
UpperCAmelCase : List[str] =model(snake_case__ , decoder_input_ids=snake_case__ ).logits
UpperCAmelCase : Any =optax.softmax_cross_entropy(snake_case__ , onehot(snake_case__ , logits.shape[-1] ) ).mean()
UpperCAmelCase : Union[str, Any] =-(labels.shape[-1] * loss.item())
UpperCAmelCase : List[str] =-84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 348 | def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
UpperCAmelCase : Dict =str(bin(__lowerCAmelCase ) )
binary_number += "0" * shift_amount
return binary_number
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
UpperCAmelCase : Any =str(bin(__lowerCAmelCase ) )[2:]
if shift_amount >= len(__lowerCAmelCase ):
return "0b0"
UpperCAmelCase : Optional[Any] =binary_number[: len(__lowerCAmelCase ) - shift_amount]
return "0b" + shifted_binary_number
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
UpperCAmelCase : Optional[Any] ='''0''' + str(bin(__lowerCAmelCase ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
UpperCAmelCase : int =len(bin(__lowerCAmelCase )[3:] ) # Find 2's complement of number
UpperCAmelCase : Any =bin(abs(__lowerCAmelCase ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Optional[Any] =(
'''1''' + '''0''' * (binary_number_length - len(__lowerCAmelCase )) + binary_number
)
if shift_amount >= len(__lowerCAmelCase ):
return "0b" + binary_number[0] * len(__lowerCAmelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__lowerCAmelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 1 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=13,__SCREAMING_SNAKE_CASE=7,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=99,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE=5,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=37,__SCREAMING_SNAKE_CASE="gelu",__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=5_12,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = embedding_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size],self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size],self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,embedding_size=self.embedding_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__SCREAMING_SNAKE_CASE,initializer_range=self.initializer_range,)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = MegatronBertModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,token_type_ids=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,token_type_ids=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape,(self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = MegatronBertForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,token_type_ids=__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = MegatronBertForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,token_type_ids=__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = MegatronBertForNextSentencePrediction(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,token_type_ids=__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, 2) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = MegatronBertForPreTraining(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,token_type_ids=__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE,next_sentence_label=__SCREAMING_SNAKE_CASE,)
self.parent.assertEqual(result.prediction_logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape,(self.batch_size, 2) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = MegatronBertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,token_type_ids=__SCREAMING_SNAKE_CASE,start_positions=__SCREAMING_SNAKE_CASE,end_positions=__SCREAMING_SNAKE_CASE,)
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MegatronBertForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,token_type_ids=__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MegatronBertForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,token_type_ids=__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = MegatronBertForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
__lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
__lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,token_type_ids=__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
a : Optional[Any] =(
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a : Dict =(
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Tuple =True
# test_resize_embeddings = False
a : Optional[int] =False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = super()._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length),dtype=torch.long,device=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size,dtype=torch.long,device=__SCREAMING_SNAKE_CASE )
return inputs_dict
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = MegatronBertModelTester(self )
__lowerCAmelCase = ConfigTester(self,config_class=__SCREAMING_SNAKE_CASE,hidden_size=37 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
return torch.tensor(
lowercase , dtype=torch.long , device=lowercase , )
_a : Tuple = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip("""Model is not available.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
__lowerCAmelCase = os.path.join(os.environ["""MYDIR"""],__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = MegatronBertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.half()
__lowerCAmelCase = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
__lowerCAmelCase = output[0, ii, jj]
__lowerCAmelCase = expected[3 * ii + jj]
__lowerCAmelCase = """ii={} jj={} a={} b={}""".format(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
self.assertTrue(math.isclose(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,rel_tol=__SCREAMING_SNAKE_CASE,abs_tol=__SCREAMING_SNAKE_CASE ),msg=__SCREAMING_SNAKE_CASE )
| 46 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = [False] * len(lowercase )
__lowerCAmelCase = [-1] * len(lowercase )
def dfs(lowercase , lowercase ):
__lowerCAmelCase = True
__lowerCAmelCase = c
for u in graph[v]:
if not visited[u]:
dfs(lowercase , 1 - c )
for i in range(len(lowercase ) ):
if not visited[i]:
dfs(lowercase , 0 )
for i in range(len(lowercase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_a : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 46 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : UNetaDModel
lowercase__ : ScoreSdeVeScheduler
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 20_00 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> Union[ImagePipelineOutput, Tuple]:
__magic_name__ : int = self.unet.config.sample_size
__magic_name__ : Optional[int] = (batch_size, 3, img_size, img_size)
__magic_name__ : List[str] = self.unet
__magic_name__ : int = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ ) * self.scheduler.init_noise_sigma
__magic_name__ : List[Any] = sample.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase__ )
self.scheduler.set_sigmas(lowerCAmelCase__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__magic_name__ : List[Any] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__magic_name__ : int = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
__magic_name__ : Tuple = self.scheduler.step_correct(lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
# prediction step
__magic_name__ : Optional[int] = model(lowerCAmelCase__ , lowerCAmelCase__ ).sample
__magic_name__ : Tuple = self.scheduler.step_pred(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
__magic_name__ ,__magic_name__ : Tuple = output.prev_sample, output.prev_sample_mean
__magic_name__ : Tuple = sample_mean.clamp(0 , 1 )
__magic_name__ : int = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__magic_name__ : Optional[Any] = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 342 |
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = [0] * len(_A )
__magic_name__ : List[str] = []
__magic_name__ : List[str] = [1] * len(_A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_A ) ):
if indegree[i] == 0:
queue.append(_A )
while queue:
__magic_name__ : Dict = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__magic_name__ : int = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_A )
print(max(_A ) )
# Adjacency list of Graph
__magic_name__: str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 342 | 1 |
a_ : List[Any] = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a_ : int = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = from_type.lower().strip('s')
SCREAMING_SNAKE_CASE = to_type.lower().strip('s')
SCREAMING_SNAKE_CASE = UNIT_SYMBOL.get(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE = UNIT_SYMBOL.get(lowerCAmelCase__ , lowerCAmelCase__)
if from_sanitized not in METRIC_CONVERSION:
SCREAMING_SNAKE_CASE = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(lowerCAmelCase__)}'''
)
raise ValueError(lowerCAmelCase__)
if to_sanitized not in METRIC_CONVERSION:
SCREAMING_SNAKE_CASE = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(lowerCAmelCase__)}'''
)
raise ValueError(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = METRIC_CONVERSION[from_sanitized]
SCREAMING_SNAKE_CASE = METRIC_CONVERSION[to_sanitized]
SCREAMING_SNAKE_CASE = 1
if from_exponent > to_exponent:
SCREAMING_SNAKE_CASE = from_exponent - to_exponent
else:
SCREAMING_SNAKE_CASE = -(to_exponent - from_exponent)
return value * pow(10 , lowerCAmelCase__)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 361 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = TFCamembertModel.from_pretrained('jplu/tf-camembert-base')
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE = model(a)['last_hidden_state']
SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10, 768))
self.assertEqual(output.shape , a)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 327 | 0 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def UpperCamelCase_ ( lowerCAmelCase__ : NDArray[floataa] , lowerCAmelCase__ : NDArray[floataa] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int , ) -> list[float]:
"""simple docstring"""
lowerCAmelCase_ ,lowerCAmelCase_ : int = coefficient_matrix.shape
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[Any] = constant_matrix.shape
if rowsa != colsa:
lowerCAmelCase_ : Optional[int] = f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(lowerCAmelCase__ )
if colsa != 1:
lowerCAmelCase_ : Any = f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(lowerCAmelCase__ )
if rowsa != rowsa:
lowerCAmelCase_ : List[str] = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) != rowsa:
lowerCAmelCase_ : List[Any] = (
'Number of initial values must be equal to number of rows in coefficient '
f"matrix but received {len(lowerCAmelCase__ )} and {rowsa}"
)
raise ValueError(lowerCAmelCase__ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
lowerCAmelCase_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowerCAmelCase_ ,lowerCAmelCase_ : Tuple = table.shape
strictly_diagonally_dominant(lowerCAmelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = []
for row in range(lowerCAmelCase__ ):
lowerCAmelCase_ : int = 0
for col in range(lowerCAmelCase__ ):
if col == row:
lowerCAmelCase_ : List[str] = table[row][col]
elif col == cols - 1:
lowerCAmelCase_ : Tuple = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowerCAmelCase_ : Optional[int] = (temp + val) / denom
new_val.append(lowerCAmelCase__ )
lowerCAmelCase_ : str = new_val
return [float(lowerCAmelCase__ ) for i in new_val]
def UpperCamelCase_ ( lowerCAmelCase__ : NDArray[floataa] ) -> bool:
"""simple docstring"""
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = table.shape
lowerCAmelCase_ : Optional[Any] = True
for i in range(0 , lowerCAmelCase__ ):
lowerCAmelCase_ : Any = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 224 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : int = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 224 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = AltDiffusionPipeline
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a (self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
UpperCAmelCase__ : Union[str, Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
torch.manual_seed(0 )
UpperCAmelCase__ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
UpperCAmelCase__ : str = CLIPTextModel(_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
UpperCAmelCase__ : Any = 77
UpperCAmelCase__ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a (self , _lowerCamelCase , _lowerCamelCase=0 ):
"""simple docstring"""
if str(_lowerCamelCase ).startswith("""mps""" ):
UpperCAmelCase__ : Dict = torch.manual_seed(_lowerCamelCase )
else:
UpperCAmelCase__ : Tuple = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a (self ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _a (self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Dict = self.get_dummy_components()
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase__ : int = RobertaSeriesModelWithTransformation(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = text_encoder
UpperCAmelCase__ : List[Any] = AltDiffusionPipeline(**_lowerCamelCase )
UpperCAmelCase__ : Any = alt_pipe.to(_lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCAmelCase__ : Dict = self.get_dummy_inputs(_lowerCamelCase )
UpperCAmelCase__ : Dict = """A photo of an astronaut"""
UpperCAmelCase__ : Optional[Any] = alt_pipe(**_lowerCamelCase )
UpperCAmelCase__ : Dict = output.images
UpperCAmelCase__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Optional[int] = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Dict = self.get_dummy_components()
UpperCAmelCase__ : str = PNDMScheduler(skip_prk_steps=_lowerCamelCase )
torch.manual_seed(0 )
UpperCAmelCase__ : int = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase__ : Dict = RobertaSeriesModelWithTransformation(_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = text_encoder
UpperCAmelCase__ : str = AltDiffusionPipeline(**_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = alt_pipe.to(_lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = self.get_dummy_inputs(_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = alt_pipe(**_lowerCamelCase )
UpperCAmelCase__ : Dict = output.images
UpperCAmelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Tuple = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=_lowerCamelCase )
UpperCAmelCase__ : List[str] = alt_pipe.to(_lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCAmelCase__ : Tuple = """A painting of a squirrel eating a burger"""
UpperCAmelCase__ : List[Any] = torch.manual_seed(0 )
UpperCAmelCase__ : str = alt_pipe([prompt] , generator=_lowerCamelCase , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""" )
UpperCAmelCase__ : Any = output.images
UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : str = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" )
UpperCAmelCase__ : List[str] = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase )
UpperCAmelCase__ : Dict = alt_pipe.to(_lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCAmelCase__ : int = """A painting of a squirrel eating a burger"""
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Any = alt_pipe([prompt] , generator=_lowerCamelCase , num_inference_steps=2 , output_type="""numpy""" )
UpperCAmelCase__ : str = output.images
UpperCAmelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : Dict = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 166 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'transfo-xl'
SCREAMING_SNAKE_CASE = ['mems']
SCREAMING_SNAKE_CASE = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , _lowerCamelCase=267735 , _lowerCamelCase=[20000, 40000, 200000] , _lowerCamelCase=1024 , _lowerCamelCase=1024 , _lowerCamelCase=16 , _lowerCamelCase=64 , _lowerCamelCase=4096 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=18 , _lowerCamelCase=1600 , _lowerCamelCase=1000 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=-1 , _lowerCamelCase=True , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="normal" , _lowerCamelCase=0.01 , _lowerCamelCase=0.01 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=0 , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Any = vocab_size
UpperCAmelCase__ : Dict = []
self.cutoffs.extend(_lowerCamelCase )
if proj_share_all_but_first:
UpperCAmelCase__ : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
UpperCAmelCase__ : List[Any] = [False] + [False] * len(self.cutoffs )
UpperCAmelCase__ : Dict = d_model
UpperCAmelCase__ : Dict = d_embed
UpperCAmelCase__ : List[Any] = d_head
UpperCAmelCase__ : List[str] = d_inner
UpperCAmelCase__ : Any = div_val
UpperCAmelCase__ : str = pre_lnorm
UpperCAmelCase__ : int = n_layer
UpperCAmelCase__ : Optional[Any] = n_head
UpperCAmelCase__ : Tuple = mem_len
UpperCAmelCase__ : Dict = same_length
UpperCAmelCase__ : Union[str, Any] = attn_type
UpperCAmelCase__ : Optional[int] = clamp_len
UpperCAmelCase__ : str = sample_softmax
UpperCAmelCase__ : Any = adaptive
UpperCAmelCase__ : List[Any] = dropout
UpperCAmelCase__ : List[Any] = dropatt
UpperCAmelCase__ : Tuple = untie_r
UpperCAmelCase__ : str = init
UpperCAmelCase__ : Optional[int] = init_range
UpperCAmelCase__ : Tuple = proj_init_std
UpperCAmelCase__ : str = init_std
UpperCAmelCase__ : List[str] = layer_norm_epsilon
super().__init__(eos_token_id=_lowerCamelCase , **_lowerCamelCase )
@property
def _a (self ):
"""simple docstring"""
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _a (self , _lowerCamelCase ):
"""simple docstring"""
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 166 | 1 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
def __init__( self : int ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Union[str, Any]=13 ,lowerCamelCase__ : str=7 ,lowerCamelCase__ : str=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=True ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Any=99 ,lowerCamelCase__ : Optional[Any]=16 ,lowerCamelCase__ : Optional[Any]=36 ,lowerCamelCase__ : Tuple=6 ,lowerCamelCase__ : List[str]=6 ,lowerCamelCase__ : Optional[Any]=6 ,lowerCamelCase__ : Union[str, Any]=37 ,lowerCamelCase__ : Union[str, Any]="gelu" ,lowerCamelCase__ : List[str]=0.1 ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : Tuple=512 ,lowerCamelCase__ : Dict=16 ,lowerCamelCase__ : Dict=2 ,lowerCamelCase__ : Dict=0.0_2 ,lowerCamelCase__ : str=3 ,lowerCamelCase__ : Tuple=4 ,lowerCamelCase__ : List[Any]=None ,):
'''simple docstring'''
_UpperCamelCase : Dict = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Optional[Any] = seq_length
_UpperCamelCase : Tuple = is_training
_UpperCamelCase : List[str] = use_input_mask
_UpperCamelCase : List[str] = use_token_type_ids
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : int = vocab_size
_UpperCamelCase : int = embedding_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Any = num_hidden_groups
_UpperCamelCase : List[Any] = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : List[Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : int = max_position_embeddings
_UpperCamelCase : int = type_vocab_size
_UpperCamelCase : str = type_sequence_label_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = num_labels
_UpperCamelCase : str = num_choices
_UpperCamelCase : str = scope
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_UpperCamelCase : Optional[int] = None
if self.use_input_mask:
_UpperCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : Optional[int] = None
if self.use_token_type_ids:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_UpperCamelCase : Any = None
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Optional[int] = None
if self.use_labels:
_UpperCamelCase : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] ,self.num_choices )
_UpperCamelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,num_hidden_groups=self.num_hidden_groups ,)
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = AlbertModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : str = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = model(lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
_UpperCamelCase : str = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Any ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : int ,lowerCamelCase__ : List[Any] ):
'''simple docstring'''
_UpperCamelCase : List[str] = AlbertForPreTraining(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Tuple = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ ,sentence_order_label=lowerCamelCase__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape ,(self.batch_size, config.num_labels) )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : int = AlbertForMaskedLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Any = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : str = AlbertForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : List[Any] = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,start_positions=lowerCamelCase__ ,end_positions=lowerCamelCase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.num_labels
_UpperCamelCase : Optional[Any] = AlbertForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Dict = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.num_labels
_UpperCamelCase : Optional[int] = AlbertForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Tuple = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Any ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : str = self.num_choices
_UpperCamelCase : List[str] = AlbertForMultipleChoice(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : List[str] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_UpperCamelCase : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_UpperCamelCase : Dict = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_UpperCamelCase : List[Any] = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Dict = config_and_inputs
_UpperCamelCase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( lowercase , lowercase , unittest.TestCase ):
lowercase__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : str ,lowerCamelCase__ : Any=False ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = super()._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ )
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
_UpperCamelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase__ )
_UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase__ )
return inputs_dict
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Tuple = AlbertModelTester(self )
_UpperCamelCase : Any = ConfigTester(self ,config_class=lowerCamelCase__ ,hidden_size=37 )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase : List[str] = type
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Union[str, Any] = AlbertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Tuple = AlbertModel.from_pretrained('albert-base-v2' )
_UpperCamelCase : Tuple = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ )[0]
_UpperCamelCase : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape ,lowerCamelCase__ )
_UpperCamelCase : str = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowerCamelCase__ ,atol=1E-4 ) )
| 83 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase__ ( lowercase ):
def __init__( self : Any ,lowerCamelCase__ : str ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : str = dataset
_UpperCamelCase : Optional[Any] = process
_UpperCamelCase : Optional[Any] = params
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Tuple ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.dataset[i]
_UpperCamelCase : Dict = self.process(lowerCamelCase__ ,**self.params )
return processed
class lowercase__ ( lowercase ):
def __init__( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int]=None ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = loader
_UpperCamelCase : Tuple = infer
_UpperCamelCase : List[str] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_UpperCamelCase : Any = None
_UpperCamelCase : Union[str, Any] = loader_batch_size
# Internal bookkeeping
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : str = None
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : int ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = iter(self.loader )
return self
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
if isinstance(self._loader_batch_data ,torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_UpperCamelCase : Union[str, Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_UpperCamelCase : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
# Convert ModelOutput to tuple first
_UpperCamelCase : str = element.to_tuple()
if isinstance(element[0] ,torch.Tensor ):
_UpperCamelCase : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] ,np.ndarray ):
_UpperCamelCase : str = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] ,torch.Tensor ):
_UpperCamelCase : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] ,np.ndarray ):
_UpperCamelCase : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_UpperCamelCase : Optional[int] = None
elif isinstance(element[self._loader_batch_index] ,torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCamelCase : int = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] ,np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCamelCase : Optional[Any] = np.expand_dims(element[self._loader_batch_index] ,0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_UpperCamelCase : Union[str, Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_UpperCamelCase : Optional[int] = self._loader_batch_data.__class__(lowerCamelCase__ )
self._loader_batch_index += 1
return result
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_UpperCamelCase : Tuple = next(self.iterator )
_UpperCamelCase : List[str] = self.infer(lowerCamelCase__ ,**self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowerCamelCase__ ,torch.Tensor ):
_UpperCamelCase : List[Any] = processed
else:
_UpperCamelCase : List[Any] = list(processed.keys() )[0]
_UpperCamelCase : Optional[int] = processed[key]
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : int = len(lowerCamelCase__ )
else:
_UpperCamelCase : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCamelCase : int = observed_batch_size
# Setting internal index to unwrap the batch
_UpperCamelCase : Dict = processed
_UpperCamelCase : str = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase__ ( lowercase ):
def __init__( self : str ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any=None ):
'''simple docstring'''
super().__init__(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def __iter__( self : Dict ):
'''simple docstring'''
_UpperCamelCase : str = iter(self.loader )
_UpperCamelCase : List[str] = None
return self
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.subiterator is None:
_UpperCamelCase : Tuple = self.infer(next(self.iterator ) ,**self.params )
try:
# Try to return next item
_UpperCamelCase : Optional[Any] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_UpperCamelCase : List[Any] = self.infer(next(self.iterator ) ,**self.params )
_UpperCamelCase : int = next(self.subiterator )
return processed
class lowercase__ ( lowercase ):
def __iter__( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Dict = iter(self.loader )
return self
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_UpperCamelCase : Dict = False
_UpperCamelCase : Tuple = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_UpperCamelCase : Dict = self.loader_batch_item()
_UpperCamelCase : List[str] = item.pop('is_last' )
accumulator.append(lowerCamelCase__ )
if is_last:
return accumulator
while not is_last:
_UpperCamelCase : List[Any] = self.infer(next(self.iterator ) ,**self.params )
if self.loader_batch_size is not None:
if isinstance(lowerCamelCase__ ,torch.Tensor ):
_UpperCamelCase : str = processed
else:
_UpperCamelCase : Any = list(processed.keys() )[0]
_UpperCamelCase : Tuple = processed[key]
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Dict = len(lowerCamelCase__ )
else:
_UpperCamelCase : Tuple = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCamelCase : Any = observed_batch_size
_UpperCamelCase : List[Any] = processed
_UpperCamelCase : int = 0
while self._loader_batch_index < self.loader_batch_size:
_UpperCamelCase : List[Any] = self.loader_batch_item()
_UpperCamelCase : Optional[Any] = item.pop('is_last' )
accumulator.append(lowerCamelCase__ )
if is_last:
return accumulator
else:
_UpperCamelCase : Any = processed
_UpperCamelCase : List[Any] = item.pop('is_last' )
accumulator.append(lowerCamelCase__ )
return accumulator
class lowercase__ ( lowercase ):
def __init__( self : Tuple ,lowerCamelCase__ : Dataset ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : int = dataset
_UpperCamelCase : str = key
def __len__( self : Dict ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Tuple ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
return self.dataset[i][self.key]
class lowercase__ ( lowercase ):
def __init__( self : List[Any] ,lowerCamelCase__ : Dataset ,lowerCamelCase__ : str ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : int = dataset
_UpperCamelCase : Optional[Any] = keya
_UpperCamelCase : str = keya
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : List[str] ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 83 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Dict = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """xlm"""
_lowerCAmelCase = {
"""hidden_size""": """emb_dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
"""n_words""": """vocab_size""", # For backward compatibility
}
def __init__( self , __magic_name__=3_01_45 , __magic_name__=20_48 , __magic_name__=12 , __magic_name__=16 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=True , __magic_name__=False , __magic_name__=False , __magic_name__=False , __magic_name__=1 , __magic_name__=True , __magic_name__=5_12 , __magic_name__=20_48**-0.5 , __magic_name__=1e-12 , __magic_name__=0.0_2 , __magic_name__=0 , __magic_name__=1 , __magic_name__=2 , __magic_name__=3 , __magic_name__=5 , __magic_name__=True , __magic_name__="first" , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=0.1 , __magic_name__=5 , __magic_name__=5 , __magic_name__=0 , __magic_name__=0 , __magic_name__=2 , __magic_name__=0 , **__magic_name__ , ) -> str:
_a = vocab_size
_a = emb_dim
_a = n_layers
_a = n_heads
_a = dropout
_a = attention_dropout
_a = gelu_activation
_a = sinusoidal_embeddings
_a = causal
_a = asm
_a = n_langs
_a = use_lang_emb
_a = layer_norm_eps
_a = bos_index
_a = eos_index
_a = pad_index
_a = unk_index
_a = mask_index
_a = is_encoder
_a = max_position_embeddings
_a = embed_init_std
_a = init_std
_a = summary_type
_a = summary_use_proj
_a = summary_activation
_a = summary_proj_to_labels
_a = summary_first_dropout
_a = start_n_top
_a = end_n_top
_a = mask_token_id
_a = lang_id
if "n_words" in kwargs:
_a = kwargs['n_words']
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , **__magic_name__ )
class a ( _SCREAMING_SNAKE_CASE ):
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_a = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 104 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
a_ : Optional[Any] = logging.get_logger(__name__)
class a ( _SCREAMING_SNAKE_CASE ):
def __init__( self , *__magic_name__ , **__magic_name__ ) -> None:
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 104 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
_UpperCAmelCase : Optional[Any] = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
lowerCamelCase__ : str = set()
lowerCamelCase__ : Any = []
def parse_line(_UpperCAmelCase ):
for line in fp:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Any = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(_UpperCAmelCase ) > 0:
lowerCamelCase__ : str = '\n'.join(_UpperCAmelCase )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(_UpperCAmelCase )
buffer.clear()
continue
else:
lowerCamelCase__ : List[str] = line.strip()
buffer.append(_UpperCAmelCase )
if from_gh:
for filename in os.listdir(_UpperCAmelCase ):
lowerCamelCase__ : Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isdir(_UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(_UpperCAmelCase ) as fp:
parse_line(_UpperCAmelCase )
else:
try:
with zipfile.ZipFile(_UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_UpperCAmelCase ) as fp:
parse_line(_UpperCAmelCase )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = set()
lowerCamelCase__ : Optional[int] = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for p in os.listdir(_UpperCAmelCase ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_UpperCAmelCase , _UpperCAmelCase ) )
return selected_warnings
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
return values.split(',' )
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
_UpperCAmelCase : Dict = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_UpperCAmelCase : Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_UpperCAmelCase : Dict = extract_warnings(args.output_dir, args.targets)
_UpperCAmelCase : Optional[Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 50 | 1 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=2 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=36 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=6 , __UpperCAmelCase=6 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=1000 , ) -> Optional[Any]:
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = patch_size
_a = text_seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = coordinate_size
_a = shape_size
_a = num_labels
_a = num_choices
_a = scope
_a = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_a = text_seq_length
_a = (image_size // patch_size) ** 2 + 1
_a = self.text_seq_length + self.image_seq_length
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_a = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_a = bbox[i, j, 3]
_a = bbox[i, j, 1]
_a = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_a = bbox[i, j, 2]
_a = bbox[i, j, 0]
_a = t
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.text_seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_a = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
_a = LayoutLMvaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# text + image
_a = model(__UpperCAmelCase , pixel_values=__UpperCAmelCase )
_a = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
_a = model(__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
_a = model(__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_a = model(__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_a = model(pixel_values=__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
_a = self.num_labels
_a = LayoutLMvaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
_a = self.num_labels
_a = LayoutLMvaForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
_a = LayoutLMvaForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = False
A_ : int = False
A_ : List[Any] = False
A_ : Dict = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Any = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = LayoutLMvaModelTester(self )
_a = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Tuple:
_a = copy.deepcopy(__UpperCAmelCase )
if model_class in get_values(__UpperCAmelCase ):
_a = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__UpperCAmelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
_a = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
elif model_class in get_values(__UpperCAmelCase ):
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
elif model_class in [
*get_values(__UpperCAmelCase ),
]:
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
elif model_class in [
*get_values(__UpperCAmelCase ),
]:
_a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__UpperCAmelCase , )
return inputs_dict
def _UpperCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> str:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = LayoutLMvaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A_ ( ):
"""simple docstring"""
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
return LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(__UpperCAmelCase )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).pixel_values.to(__UpperCAmelCase )
_a = torch.tensor([[1, 2]] )
_a = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_a = model(
input_ids=input_ids.to(__UpperCAmelCase ) , bbox=bbox.to(__UpperCAmelCase ) , pixel_values=pixel_values.to(__UpperCAmelCase ) , )
# verify the logits
_a = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , __UpperCAmelCase )
_a = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1e-4 ) ) | 153 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = RobertaTokenizer
A_ : Any = RobertaTokenizerFast
A_ : Dict = True
A_ : Tuple = {'cls_token': '<s>'}
def _UpperCAmelCase ( self ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
_a = '''lower newer'''
_a = '''lower newer'''
return input_text, output_text
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''lower newer'''
_a = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_a = tokenizer.tokenize(__UpperCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__UpperCAmelCase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__UpperCAmelCase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained('''roberta-base''' )
_a = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCAmelCase )
_a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCAmelCase )
_a = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
_a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_tokenizer()
_a = '''Encode this sequence.'''
_a = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
_a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
_a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
_a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
# Testing spaces after special tokens
_a = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )} ) # mask token has a left space
_a = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
_a = '''Encode <mask> sequence'''
_a = '''Encode <mask>sequence'''
_a = tokenizer.encode(__UpperCAmelCase )
_a = encoded.index(__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
_a = tokenizer.encode(__UpperCAmelCase )
_a = encoded.index(__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
_a = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
_a = '''A, <mask> AllenNLP sentence.'''
_a = tokenizer_r.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
_a = tokenizer_p.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def _UpperCAmelCase ( self ) -> Any:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __UpperCAmelCase )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __UpperCAmelCase )
self.assertEqual(post_processor_state['''trim_offsets'''] , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_a = F'{text_of_1_token} {text_of_1_token}'
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ) + 1, 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , ) | 153 | 1 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowerCamelCase__ ) , '''Tatoeba directory does not exist.''' )
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : List[Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ , A_ : str = self.resolver.write_model_card("opus-mt-he-en" , dry_run=snake_case )
assert mmeta["long_pair"] == "heb-eng"
| 300 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCAmelCase : Optional[int] = '''
Human: <<task>>
Assistant: '''
_lowerCAmelCase : int = '''huggingface-tools/default-prompts'''
_lowerCAmelCase : Any = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict="run" ) -> List[Any]:
if prompt_or_repo_id is None:
A_ : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCAmelCase ) is not None:
return prompt_or_repo_id
A_ : Optional[Any] = cached_file(
_lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 300 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : int = 10 , lowerCAmelCase : int = 22 ):
"""simple docstring"""
__magic_name__ : Any = range(1 , lowerCAmelCase )
__magic_name__ : List[Any] = range(1 , lowerCAmelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F'{solution(1_0, 2_2) = }') | 275 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
__magic_name__ : Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __lowerCAmelCase ( self : str ) -> Any:
__magic_name__ : Union[str, Any] = self.dummy_uncond_unet
__magic_name__ : str = KarrasVeScheduler()
__magic_name__ : List[Any] = KarrasVePipeline(unet=_A , scheduler=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Dict = torch.manual_seed(0 )
__magic_name__ : int = pipe(num_inference_steps=2 , generator=_A , output_type='numpy' ).images
__magic_name__ : Any = torch.manual_seed(0 )
__magic_name__ : str = pipe(num_inference_steps=2 , generator=_A , output_type='numpy' , return_dict=_A )[0]
__magic_name__ : int = image[0, -3:, -3:, -1]
__magic_name__ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] ) -> str:
__magic_name__ : Optional[int] = 'google/ncsnpp-celebahq-256'
__magic_name__ : List[str] = UNetaDModel.from_pretrained(_A )
__magic_name__ : int = KarrasVeScheduler()
__magic_name__ : str = KarrasVePipeline(unet=_A , scheduler=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Any = torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = pipe(num_inference_steps=20 , generator=_A , output_type='numpy' ).images
__magic_name__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__magic_name__ : int = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 275 | 1 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if len(lowerCamelCase__ ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase__ ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
__lowerCamelCase : Optional[int] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[list, list, list, list]:
if len(lowerCamelCase__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
__lowerCamelCase : Tuple = len(lowerCamelCase__ )
__lowerCamelCase : List[Any] = matrix_length // 2
__lowerCamelCase : Dict = [[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ )]
__lowerCamelCase : str = [
[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )
]
__lowerCamelCase : Dict = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ )]
__lowerCamelCase : Optional[Any] = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[int, int]:
return len(lowerCamelCase__ ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
print('\n'.join(str(lowerCamelCase__ ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if matrix_dimensions(lowerCamelCase__ ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = split_matrix(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = split_matrix(lowerCamelCase__ )
__lowerCamelCase : str = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : List[str] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : List[Any] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : Tuple = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Optional[int] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Dict = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Tuple = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Dict = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : Tuple = matrix_addition(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[str] = matrix_addition(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Any = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ )
# construct the new matrix from our 4 quadrants
__lowerCamelCase : List[Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if matrix_dimensions(lowerCamelCase__ )[1] != matrix_dimensions(lowerCamelCase__ )[0]:
__lowerCamelCase : Any = (
'Unable to multiply these matrices, please check the dimensions.\n'
F"Matrix A: {matrixa}\n"
F"Matrix B: {matrixa}"
)
raise Exception(lowerCamelCase__ )
__lowerCamelCase : str = matrix_dimensions(lowerCamelCase__ )
__lowerCamelCase : List[str] = matrix_dimensions(lowerCamelCase__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCamelCase : str = max(*lowerCamelCase__ , *lowerCamelCase__ )
__lowerCamelCase : List[str] = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase__ ) ) ) )
__lowerCamelCase : Any = matrixa
__lowerCamelCase : int = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__lowerCamelCase : List[str] = actual_strassen(lowerCamelCase__ , lowerCamelCase__ )
# Removing the additional zeros
for i in range(0 , lowerCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a =[
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a =[[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 73 |
# Function to print upper half of diamond (pyramid)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
for i in range(0 , lowerCamelCase__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
for i in range(lowerCamelCase__ , 0 , -1 ):
for _ in range(lowerCamelCase__ , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCamelCase__ ) # upper half
reverse_floyd(lowerCamelCase__ ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
a =1
while K:
a =int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
a =int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 73 | 1 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a__ = 16
a__ = 32
def UpperCamelCase__( UpperCamelCase__ : Accelerator , UpperCamelCase__ : int = 16 )->str:
A__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(UpperCamelCase__ : Any ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
lowercase_ , padding='''longest''' , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
A__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a__ = mocked_dataloaders # noqa: F811
def UpperCamelCase__( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] )->Union[str, Any]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowercase_ ) == "1":
A__ = 2
# Initialize accelerator
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['''lr''']
A__ = int(config['''num_epochs'''] )
A__ = int(config['''seed'''] )
A__ = int(config['''batch_size'''] )
A__ = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase_ )
def inner_training_loop(UpperCamelCase__ : str ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowercase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=lowercase_ )
A__ , A__ = get_dataloaders(lowercase_ , lowercase_ )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=1_00 , num_training_steps=(len(lowercase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Now we train the model
for epoch in range(lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**lowercase_ )
A__ = outputs.loss
accelerator.backward(lowercase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**lowercase_ )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , lowercase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCamelCase__( )->Optional[int]:
A__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowercase_ , default=lowercase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A__ = parser.parse_args()
A__ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 356 |
a__: dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
a__: dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : str , UpperCamelCase__ : str )->float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
A__ = (
f"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"
f"Valid values are: {', '.join(UpperCamelCase__ )}"
)
raise ValueError(UpperCamelCase__ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 | 0 |
'''simple docstring'''
def _a( UpperCamelCase__ : Optional[int] = 5_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =[1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2, 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''') | 152 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Union[str, Any] = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
A_ : int = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = torch.load(lowerCAmelCase_ , map_location="""cpu""" )
return sd
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=rename_keys_prefix )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = OrderedDict()
_UpperCAmelCase : Any = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_UpperCAmelCase : Dict = key
for name_pair in rename_keys_prefix:
_UpperCAmelCase : str = new_key.replace(name_pair[0] , name_pair[1] )
_UpperCAmelCase : str = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_UpperCAmelCase : int = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
'''simple docstring'''
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_UpperCAmelCase : Optional[int] = """pretraining"""
if "vcr" in checkpoint_path:
_UpperCAmelCase : Optional[int] = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
_UpperCAmelCase : List[Any] = {"""visual_embedding_dim""": 2048}
elif "vqa" in checkpoint_path:
_UpperCAmelCase : Any = {"""visual_embedding_dim""": 2048}
elif "nlvr" in checkpoint_path:
_UpperCAmelCase : Any = {"""visual_embedding_dim""": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
_UpperCAmelCase : str = {"""visual_embedding_dim""": 512}
_UpperCAmelCase : int = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
_UpperCAmelCase : str = {"""visual_embedding_dim""": 2048}
_UpperCAmelCase : int = """vqa_advanced"""
elif "vqa" in checkpoint_path:
_UpperCAmelCase : List[str] = {"""visual_embedding_dim""": 2048, """num_labels""": 3129}
_UpperCAmelCase : int = """vqa"""
elif "nlvr" in checkpoint_path:
_UpperCAmelCase : int = {
"""visual_embedding_dim""": 1024,
"""num_labels""": 2,
}
_UpperCAmelCase : Optional[Any] = """nlvr"""
_UpperCAmelCase : int = VisualBertConfig(**lowerCAmelCase_ )
# Load State Dict
_UpperCAmelCase : Any = load_state_dict(lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = get_new_dict(lowerCAmelCase_ , lowerCAmelCase_ )
if model_type == "pretraining":
_UpperCAmelCase : List[str] = VisualBertForPreTraining(lowerCAmelCase_ )
elif model_type == "vqa":
_UpperCAmelCase : Optional[int] = VisualBertForQuestionAnswering(lowerCAmelCase_ )
elif model_type == "nlvr":
_UpperCAmelCase : str = VisualBertForVisualReasoning(lowerCAmelCase_ )
elif model_type == "multichoice":
_UpperCAmelCase : Dict = VisualBertForMultipleChoice(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
A_ : int = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 215 | 0 |
"""simple docstring"""
def lowercase ( a__ : int ) -> int:
assert (
isinstance(a__ , a__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
_UpperCamelCase , _UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
_UpperCamelCase , _UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 | """simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
UpperCAmelCase = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def lowercase ( a__ : Union[str, Any] , a__ : int , a__ : List[Any] , a__ : Union[str, Any] , a__ : Tuple , a__ : List[Any] ) -> Optional[Any]:
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(a__ ) , version.parse(a__ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def lowercase ( a__ : str , a__ : Optional[str] = None ) -> None:
_UpperCamelCase = F'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , a__ ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = requirement, None, None
else:
_UpperCamelCase = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , a__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F''' got {requirement}''' )
_UpperCamelCase , _UpperCamelCase = match[0]
_UpperCamelCase = want_full.split(''',''' ) # there could be multiple requirements
_UpperCamelCase = {}
for w in want_range:
_UpperCamelCase = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , a__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F''' but got {requirement}''' )
_UpperCamelCase , _UpperCamelCase = match[0]
_UpperCamelCase = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
_UpperCamelCase = '''.'''.join([str(a__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(a__ , a__ , a__ , a__ , a__ , a__ )
return
# check if any version is installed
try:
_UpperCamelCase = importlib.metadata.version(a__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(a__ , a__ , a__ , a__ , a__ , a__ )
def lowercase ( a__ : Tuple ) -> Any:
_UpperCamelCase = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(a__ , a__ )
| 54 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
snake_case_ = logging.get_logger(__name__)
def _lowerCAmelCase ( lowercase_ ):
if isinstance(lowercase_ , np.ndarray ):
return list(tensor.shape )
UpperCAmelCase = tf.shape(lowercase_ )
if tensor.shape == tf.TensorShape(lowercase_ ):
return dynamic
UpperCAmelCase = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(lowercase_ )]
def _lowerCAmelCase ( lowercase_ , lowercase_ = None , lowercase_ = None ):
return tf.nn.softmax(logits=logits + 1e-9 , axis=lowercase_ , name=lowercase_ )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=1e-5 , lowercase_=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowercase_ , lowercase_ ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
UpperCAmelCase , UpperCAmelCase = tf.nn.moments(lowercase_ , axes=[axis] , keepdims=lowercase_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
UpperCAmelCase = [1] * inputs.shape.rank
UpperCAmelCase = shape_list(lowercase_ )[axis]
UpperCAmelCase = tf.reshape(lowercase_ , lowercase_ )
UpperCAmelCase = tf.reshape(lowercase_ , lowercase_ )
# Compute layer normalization using the batch_normalization
# function.
UpperCAmelCase = tf.nn.batch_normalization(
lowercase_ , lowercase_ , lowercase_ , offset=lowercase_ , scale=lowercase_ , variance_epsilon=lowercase_ , )
return outputs
def _lowerCAmelCase ( lowercase_ , lowercase_=0 , lowercase_=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
UpperCAmelCase = tf.shape(lowercase_ )
UpperCAmelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
UpperCAmelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(lowercase_ , lowercase_ )
def _lowerCAmelCase ( lowercase_ ):
if not isinstance(lowercase_ , tf.Tensor ):
UpperCAmelCase = tf.convert_to_tensor(lowercase_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
UpperCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
UpperCAmelCase = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
UpperCAmelCase = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ = "input_ids" ):
tf.debugging.assert_less(
lowercase_ , tf.cast(lowercase_ , dtype=tensor.dtype ) , message=(
F"""The maximum value of {tensor_name} ({tf.math.reduce_max(lowercase_ )}) must be smaller than the embedding """
F"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
UpperCAmelCase = [x for x in data if len(lowercase_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
F"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
F"""bytes: {bad_attributes}""" )
UpperCAmelCase = np.asarray(lowercase_ )
UpperCAmelCase = 1
UpperCAmelCase = np.array_split(lowercase_ , lowercase_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
UpperCAmelCase = np.array_split(lowercase_ , lowercase_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(lowercase_ ):
UpperCAmelCase = chunk_data
else:
UpperCAmelCase = data
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
if name in group.attrs:
UpperCAmelCase = [n.decode('utf8' ) if hasattr(lowercase_ , 'decode' ) else n for n in group.attrs[name]]
else:
UpperCAmelCase = []
UpperCAmelCase = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(lowercase_ , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def _lowerCAmelCase ( lowercase_ ):
def _expand_single_ad_tensor(lowercase_ ):
if isinstance(lowercase_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(lowercase_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , lowercase_ )
| 78 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case_ = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=8 ):
UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _lowerCAmelCase ( lowercase_ , lowercase_=512 , lowercase_=512 ):
UpperCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase = arr.astype(np.floataa ) / 1_2_7.5 - 1
UpperCAmelCase = np.transpose(lowercase_ , [2, 0, 1] )
UpperCAmelCase = torch.from_numpy(lowercase_ ).unsqueeze(0 )
return image
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :UNetaDConditionModel , lowercase_ :DDPMScheduler , lowercase_ :VQModel , ) -> List[str]:
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Tuple , lowercase_ :Any ) -> Optional[int]:
# get the original timestep using init_timestep
UpperCAmelCase = min(int(num_inference_steps * strength ) , lowercase_ )
UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Dict , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Optional[Any] , lowercase_ :Any=None ) -> Any:
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}""" )
UpperCAmelCase = image.to(device=lowercase_ , dtype=lowercase_ )
UpperCAmelCase = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase = image
else:
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase_ )
]
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
else:
UpperCAmelCase = self.movq.encode(lowercase_ ).latent_dist.sample(lowercase_ )
UpperCAmelCase = self.movq.config.scaling_factor * init_latents
UpperCAmelCase = torch.cat([init_latents] , dim=0 )
UpperCAmelCase = init_latents.shape
UpperCAmelCase = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
UpperCAmelCase = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = init_latents
return latents
def UpperCAmelCase__ ( self :int , lowercase_ :int=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str=0 ) -> Dict:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self :List[Any] ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self :str , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :int = 5_12 , lowercase_ :int = 5_12 , lowercase_ :int = 1_00 , lowercase_ :float = 4.0 , lowercase_ :float = 0.3 , lowercase_ :int = 1 , lowercase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ :Optional[str] = "pil" , lowercase_ :bool = True , ) -> List[str]:
UpperCAmelCase = self._execution_device
UpperCAmelCase = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase = image_embeds.shape[0]
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [image]
if not all(isinstance(lowercase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(lowercase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
UpperCAmelCase = torch.cat([prepare_image(lowercase_ , lowercase_ , lowercase_ ) for i in image] , dim=0 )
UpperCAmelCase = image.to(dtype=image_embeds.dtype , device=lowercase_ )
UpperCAmelCase = self.movq.encode(lowercase_ )['latents']
UpperCAmelCase = latents.repeat_interleave(lowercase_ , dim=0 )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase , UpperCAmelCase = self.get_timesteps(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase , UpperCAmelCase = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
UpperCAmelCase = self.prepare_latents(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , image_embeds.dtype , lowercase_ , lowercase_ )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {'image_embeds': image_embeds}
UpperCAmelCase = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 78 | 1 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]=1_3 , UpperCAmelCase_ : Union[str, Any]=3_0 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[str]=3_2 , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : int=3_7 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Tuple=1_0 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=2 , ):
"""simple docstring"""
a : Optional[int] = parent
a : Any = batch_size
a : str = image_size
a : List[Any] = patch_size
a : Dict = num_channels
a : List[str] = is_training
a : Any = use_labels
a : Union[str, Any] = hidden_size
a : List[str] = num_hidden_layers
a : List[str] = num_attention_heads
a : Optional[Any] = intermediate_size
a : str = hidden_act
a : str = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Optional[Any] = type_sequence_label_size
a : Optional[Any] = initializer_range
a : List[Any] = scope
a : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
a : str = (image_size // patch_size) ** 2
a : Dict = num_patches + 2
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : List[Any] = None
if self.use_labels:
a : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : Tuple = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Optional[Any] = DeiTModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Tuple = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : int = DeiTForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : List[str] = 1
a : Tuple = DeiTForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : int = model(UpperCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : List[str] = self.type_sequence_label_size
a : Optional[int] = DeiTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : List[Any] = 1
a : Union[str, Any] = DeiTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : int = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Dict = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) ,
) : List[str] = config_and_inputs
a : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : Tuple = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A : Optional[int] = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
A : Union[str, Any] = False
A : List[str] = False
A : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Tuple = DeiTModelTester(self)
a : int = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Any = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : int = model_class(UpperCAmelCase_)
a : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Tuple = [*signature.parameters.keys()]
a : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=False):
"""simple docstring"""
a : int = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
if not self.model_tester.is_training:
return
a , a : str = self.model_tester.prepare_config_and_inputs_for_common()
a : List[str] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(UpperCAmelCase_)
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
a : Union[str, Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.train()
a : Optional[Any] = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
a : Optional[int] = model(**UpperCAmelCase_).loss
loss.backward()
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a , a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a : Dict = False
a : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCAmelCase_) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
a : Any = model_class(UpperCAmelCase_)
model.gradient_checkpointing_enable()
model.to(UpperCAmelCase_)
model.train()
a : Any = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
a : List[str] = model(**UpperCAmelCase_).loss
loss.backward()
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a , a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : Union[str, Any] = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(UpperCAmelCase_),
*get_values(UpperCAmelCase_),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}"""):
a : List[Any] = problem_type['title']
a : Any = problem_type['num_labels']
a : Optional[Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.train()
a : Tuple = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if problem_type["num_labels"] > 1:
a : Optional[int] = inputs['labels'].unsqueeze(1).repeat(1 , problem_type['num_labels'])
a : Any = inputs['labels'].to(problem_type['dtype'])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=UpperCAmelCase_) as warning_list:
a : int = model(**UpperCAmelCase_).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""")
loss.backward()
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = DeiTModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
"""simple docstring"""
a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Optional[Any] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224').to(
UpperCAmelCase_)
a : List[Any] = self.default_image_processor
a : Dict = prepare_img()
a : Union[str, Any] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : int = model(**UpperCAmelCase_)
# verify the logits
a : Optional[int] = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Dict = torch.tensor([-1.02_66, 0.19_12, -1.28_61]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[int] = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto')
a : List[Any] = self.default_image_processor
a : Any = prepare_img()
a : Union[str, Any] = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : Union[str, Any] = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
a : Tuple = model(UpperCAmelCase_)
| 345 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : list[int | float] , snake_case : int , snake_case : int ) -> int | float:
"""simple docstring"""
if len(snake_case ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(snake_case )
or left < -len(snake_case )
or right >= len(snake_case )
or right < -len(snake_case )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
a : Union[str, Any] = (left + right) >> 1 # the middle
a : List[str] = find_max(snake_case , snake_case , snake_case ) # find max in range[left, mid]
a : Dict = find_max(snake_case , mid + 1 , snake_case ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 345 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.