code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( _lowerCAmelCase : str ) -> list[int]:
return [ord(_lowerCAmelCase ) - 96 for elem in plain]
def snake_case_ ( _lowerCAmelCase : list[int] ) -> str:
return "".join(chr(elem + 96 ) for elem in encoded )
def snake_case_ ( ) -> None:
UpperCAmelCase : Any = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , _lowerCAmelCase )
print('''Decoded:''' , decode(_lowerCAmelCase ) )
if __name__ == "__main__":
main()
| 127
|
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : Any = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__snake_case ) )
def A ( self : Tuple ) -> Tuple:
UpperCAmelCase : Optional[Any] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__snake_case ) )
def A ( self : str ) -> str:
UpperCAmelCase : int = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__snake_case ) )
def A ( self : str ) -> Tuple:
UpperCAmelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__snake_case ) )
def A ( self : Tuple ) -> Optional[int]:
UpperCAmelCase : List[str] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__snake_case ) )
def A ( self : int ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
UpperCAmelCase : List[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__snake_case , variant=__snake_case ) )
def A ( self : Union[str, Any] ) -> str:
UpperCAmelCase : Optional[int] = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
UpperCAmelCase : int = '''fp16'''
self.assertTrue(is_safetensors_compatible(__snake_case , variant=__snake_case ) )
def A ( self : Dict ) -> Tuple:
# pass variant but use the non-variant filenames
UpperCAmelCase : str = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
UpperCAmelCase : int = '''fp16'''
self.assertTrue(is_safetensors_compatible(__snake_case , variant=__snake_case ) )
def A ( self : Optional[int] ) -> List[str]:
UpperCAmelCase : str = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCAmelCase : Optional[int] = '''fp16'''
self.assertFalse(is_safetensors_compatible(__snake_case , variant=__snake_case ) )
def A ( self : str ) -> Union[str, Any]:
UpperCAmelCase : List[str] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
UpperCAmelCase : List[str] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__snake_case , variant=__snake_case ) )
def A ( self : Optional[int] ) -> List[str]:
# pass variant but use the non-variant filenames
UpperCAmelCase : Optional[int] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
UpperCAmelCase : List[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__snake_case , variant=__snake_case ) )
def A ( self : int ) -> Union[str, Any]:
UpperCAmelCase : Any = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
UpperCAmelCase : Dict = '''fp16'''
self.assertFalse(is_safetensors_compatible(__snake_case , variant=__snake_case ) )
| 127
| 1
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = XGLMConfig
_a = {}
_a = "gelu"
def __init__( self , A , A=14 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=2 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=0.02 , ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = d_model
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = ffn_dim
__magic_name__ = activation_function
__magic_name__ = activation_dropout
__magic_name__ = attention_dropout
__magic_name__ = max_position_embeddings
__magic_name__ = initializer_range
__magic_name__ = None
__magic_name__ = 0
__magic_name__ = 2
__magic_name__ = 1
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = self.get_config()
__magic_name__ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __A ( self ) -> int:
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=A_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=A_ , )
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_a = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_a = (TFXGLMForCausalLM,) if is_tf_available() else ()
_a = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
_a = False
_a = False
_a = False
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = TFXGLMModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A_ , n_embd=37 )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def __A ( self ) -> Any:
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self , A=True ) -> List[str]:
'''simple docstring'''
__magic_name__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
__magic_name__ = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__magic_name__ = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
__magic_name__ = model.generate(A_ , do_sample=A_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , A_ )
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
__magic_name__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
__magic_name__ = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
__magic_name__ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
__magic_name__ = model.generate(A_ , do_sample=A_ , seed=[7, 0] )
__magic_name__ = tokenizer.decode(output_ids[0] , skip_special_tokens=A_ )
__magic_name__ = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(A_ , A_ )
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
__magic_name__ = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
__magic_name__ = '''left'''
# use different length sentences to test batching
__magic_name__ = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
__magic_name__ = tokenizer(A_ , return_tensors='''tf''' , padding=A_ )
__magic_name__ = inputs['''input_ids''']
__magic_name__ = model.generate(input_ids=A_ , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
__magic_name__ = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__magic_name__ = model.generate(input_ids=A_ , max_new_tokens=12 )
__magic_name__ = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__magic_name__ = model.generate(input_ids=A_ , max_new_tokens=12 )
__magic_name__ = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
__magic_name__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_ )
__magic_name__ = tokenizer.decode(output_padded[0] , skip_special_tokens=A_ )
__magic_name__ = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , [non_padded_sentence, padded_sentence] )
| 703
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return "".join(sorted(snake_case_ ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return word_by_signature[signature(snake_case_ )]
a_ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
a_ : Optional[Any] = sorted({word.strip().lower() for word in data.splitlines()})
a_ : List[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a_ : Optional[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 678
| 0
|
"""simple docstring"""
from collections.abc import Generator
from math import sin
def snake_case__ ( _lowerCamelCase ) ->bytes:
"""simple docstring"""
if len(_lowerCamelCase ) != 32:
raise ValueError("Input must be of length 32" )
__lowercase : int = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case__ ( _lowerCamelCase ) ->bytes:
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
__lowercase : str = format(_lowerCamelCase, "08x" )[-8:]
__lowercase : List[Any] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def snake_case__ ( _lowerCamelCase ) ->bytes:
"""simple docstring"""
__lowercase : int = b""
for char in message:
bit_string += format(_lowerCamelCase, "08b" ).encode("utf-8" )
__lowercase : int = format(len(_lowerCamelCase ), "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_lowerCamelCase ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case__ ( _lowerCamelCase ) ->Generator[list[int], None, None]:
"""simple docstring"""
if len(_lowerCamelCase ) % 5_12 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0, len(_lowerCamelCase ), 5_12 ):
__lowercase : str = bit_string[pos : pos + 5_12]
__lowercase : List[Any] = []
for i in range(0, 5_12, 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ), 2 ) )
yield block_words
def snake_case__ ( _lowerCamelCase ) ->int:
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
__lowercase : Dict = format(_lowerCamelCase, "032b" )
__lowercase : Optional[int] = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_lowerCamelCase, 2 )
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->int:
"""simple docstring"""
return (a + b) % 2**32
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->int:
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case__ ( _lowerCamelCase ) ->bytes:
"""simple docstring"""
__lowercase : str = preprocess(_lowerCamelCase )
__lowercase : str = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__lowercase : str = 0X67_45_23_01
__lowercase : Optional[Any] = 0Xef_cd_ab_89
__lowercase : int = 0X98_ba_dc_fe
__lowercase : str = 0X10_32_54_76
__lowercase : Optional[Any] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_lowerCamelCase ):
__lowercase : List[Any] = aa
__lowercase : List[str] = ba
__lowercase : Dict = ca
__lowercase : str = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__lowercase : int = d ^ (b & (c ^ d))
__lowercase : str = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__lowercase : List[Any] = c ^ (d & (b ^ c))
__lowercase : Optional[Any] = (5 * i + 1) % 16
elif i <= 47:
__lowercase : Tuple = b ^ c ^ d
__lowercase : Optional[Any] = (3 * i + 5) % 16
else:
__lowercase : Any = c ^ (b | not_aa(_lowerCamelCase ))
__lowercase : List[str] = (7 * i) % 16
__lowercase : Union[str, Any] = (f + a + added_consts[i] + block_words[g]) % 2**32
__lowercase : Optional[Any] = d
__lowercase : List[str] = c
__lowercase : Optional[int] = b
__lowercase : List[str] = sum_aa(_lowerCamelCase, left_rotate_aa(_lowerCamelCase, shift_amounts[i] ) )
# Add hashed chunk to running total
__lowercase : int = sum_aa(_lowerCamelCase, _lowerCamelCase )
__lowercase : Optional[int] = sum_aa(_lowerCamelCase, _lowerCamelCase )
__lowercase : Optional[Any] = sum_aa(_lowerCamelCase, _lowerCamelCase )
__lowercase : Optional[Any] = sum_aa(_lowerCamelCase, _lowerCamelCase )
__lowercase : List[Any] = reformat_hex(_lowerCamelCase ) + reformat_hex(_lowerCamelCase ) + reformat_hex(_lowerCamelCase ) + reformat_hex(_lowerCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 575
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__A : Tuple = random.Random()
def snake_case__ ( _lowerCamelCase, _lowerCamelCase=1.0, _lowerCamelCase=None, _lowerCamelCase=None ) ->str:
"""simple docstring"""
if rng is None:
__lowercase : str = global_rng
__lowercase : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowercase__ : int , lowercase__ : Optional[Any]=7 , lowercase__ : List[Any]=4_0_0 , lowercase__ : Optional[int]=2_0_0_0 , lowercase__ : Any=1 , lowercase__ : Union[str, Any]=0.0 , lowercase__ : Tuple=1_6_0_0_0 , lowercase__ : Tuple=True , lowercase__ : Tuple=8_0 , lowercase__ : int=1_6 , lowercase__ : Optional[Any]=6_4 , lowercase__ : List[str]="hann_window" , lowercase__ : Union[str, Any]=8_0 , lowercase__ : List[str]=7_6_0_0 , lowercase__ : int=1e-10 , lowercase__ : Tuple=True , ):
__lowercase : Any = parent
__lowercase : List[str] = batch_size
__lowercase : Optional[Any] = min_seq_length
__lowercase : Dict = max_seq_length
__lowercase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase : Dict = feature_size
__lowercase : int = padding_value
__lowercase : int = sampling_rate
__lowercase : Optional[int] = do_normalize
__lowercase : Union[str, Any] = num_mel_bins
__lowercase : str = hop_length
__lowercase : str = win_length
__lowercase : List[str] = win_function
__lowercase : Union[str, Any] = fmin
__lowercase : Optional[int] = fmax
__lowercase : Optional[Any] = mel_floor
__lowercase : Union[str, Any] = return_attention_mask
def snake_case ( self : Tuple ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def snake_case ( self : Tuple , lowercase__ : Optional[int]=False , lowercase__ : str=False ):
def _flatten(lowercase__ : Optional[Any] ):
return list(itertools.chain(*lowercase__ ) )
if equal_length:
__lowercase : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__lowercase : List[str] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowercase : str = [np.asarray(lowercase__ ) for x in speech_inputs]
return speech_inputs
def snake_case ( self : Optional[int] , lowercase__ : int=False , lowercase__ : Any=False ):
if equal_length:
__lowercase : Any = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowercase : Optional[Any] = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowercase : List[str] = [np.asarray(lowercase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = SpeechTaFeatureExtractor
def snake_case ( self : Tuple ):
__lowercase : List[Any] = SpeechTaFeatureExtractionTester(self )
def snake_case ( self : Any , lowercase__ : Dict ):
self.assertTrue(np.all(np.mean(lowercase__ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase__ , axis=0 ) - 1 ) < 1e-3 ) )
def snake_case ( self : Any ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__lowercase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase : Union[str, Any] = [np.asarray(lowercase__ ) for speech_input in speech_inputs]
# Test not batched input
__lowercase : List[str] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__lowercase : List[Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
# Test batched
__lowercase : Union[str, Any] = feat_extract(lowercase__ , return_tensors="np" ).input_values
__lowercase : Dict = feat_extract(lowercase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase__ , lowercase__ ):
self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
def snake_case ( self : Tuple ):
__lowercase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase : List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase : List[str] = ["longest", "max_length", "do_not_pad"]
__lowercase : List[str] = [None, 1_6_0_0, None]
for max_length, padding in zip(lowercase__ , lowercase__ ):
__lowercase : Tuple = feat_extract(lowercase__ , padding=lowercase__ , max_length=lowercase__ , return_tensors="np" )
__lowercase : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def snake_case ( self : str ):
__lowercase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase : List[Any] = range(8_0_0 , 1_4_0_0 , 2_0_0 )
__lowercase : Tuple = [floats_list((1, x) )[0] for x in lengths]
__lowercase : Any = ["longest", "max_length", "do_not_pad"]
__lowercase : List[str] = [None, 1_6_0_0, None]
for max_length, padding in zip(lowercase__ , lowercase__ ):
__lowercase : int = feat_extract(lowercase__ , max_length=lowercase__ , padding=lowercase__ )
__lowercase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def snake_case ( self : Dict ):
__lowercase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase : List[str] = feat_extract(
lowercase__ , truncation=lowercase__ , max_length=1_0_0_0 , padding="max_length" , return_tensors="np" )
__lowercase : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case ( self : List[Any] ):
__lowercase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase : List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase : Union[str, Any] = feat_extract(
lowercase__ , truncation=lowercase__ , max_length=1_0_0_0 , padding="longest" , return_tensors="np" )
__lowercase : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
__lowercase : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase : Optional[int] = feat_extract(
lowercase__ , truncation=lowercase__ , max_length=2_0_0_0 , padding="longest" , return_tensors="np" )
__lowercase : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def snake_case ( self : Dict ):
__lowercase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase : Optional[int] = np.random.rand(1_0_0 ).astype(np.floataa )
__lowercase : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowercase : Any = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__lowercase : Tuple = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def snake_case ( self : int ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__lowercase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase : str = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase : Union[str, Any] = [np.asarray(lowercase__ ) for speech_input in speech_inputs]
# Test feature size
__lowercase : List[str] = feature_extractor(audio_target=lowercase__ , padding=lowercase__ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
__lowercase : List[Any] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
__lowercase : Any = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
# Test batched
__lowercase : int = feature_extractor(lowercase__ , return_tensors="np" ).input_values
__lowercase : List[str] = feature_extractor(lowercase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase__ , lowercase__ ):
self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowercase : Dict = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__lowercase : List[Any] = np.asarray(lowercase__ )
__lowercase : Any = feature_extractor(lowercase__ , return_tensors="np" ).input_values
__lowercase : int = feature_extractor(lowercase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase__ , lowercase__ ):
self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
def snake_case ( self : Any ):
__lowercase : Any = self.feat_extract_tester.prepare_inputs_for_target()
__lowercase : str = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase : Tuple = feat_extract.model_input_names[0]
__lowercase : Dict = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowercase__ ) == len(lowercase__ ) for x, y in zip(lowercase__ , processed_features[input_name] ) ) )
__lowercase : Any = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowercase__ )
__lowercase : int = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
__lowercase : Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase : Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def snake_case ( self : List[Any] ):
__lowercase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowercase__ )
__lowercase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase : Optional[Any] = feat_extract.model_input_names[0]
__lowercase : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
__lowercase : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase : Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def snake_case ( self : List[str] ):
__lowercase : Any = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
__lowercase : Dict = feat_extract.model_input_names[0]
__lowercase : Optional[int] = BatchFeature({input_name: speech_inputs} )
__lowercase : Union[str, Any] = feat_extract.num_mel_bins # hack!
__lowercase : Dict = feat_extract.pad(lowercase__ , padding="longest" , return_tensors="np" )[input_name]
__lowercase : Optional[int] = feat_extract.pad(lowercase__ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def snake_case ( self : List[Any] ):
__lowercase : Union[str, Any] = self.feat_extract_dict
__lowercase : List[Any] = True
__lowercase : int = self.feature_extraction_class(**lowercase__ )
__lowercase : List[str] = self.feat_extract_tester.prepare_inputs_for_target()
__lowercase : Dict = [len(lowercase__ ) for x in speech_inputs]
__lowercase : int = feat_extract.model_input_names[0]
__lowercase : str = BatchFeature({input_name: speech_inputs} )
__lowercase : Union[str, Any] = feat_extract.num_mel_bins # hack!
__lowercase : int = feat_extract.pad(lowercase__ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowercase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowercase__ )
def snake_case ( self : str ):
__lowercase : List[str] = self.feat_extract_dict
__lowercase : List[Any] = True
__lowercase : Tuple = self.feature_extraction_class(**lowercase__ )
__lowercase : Tuple = self.feat_extract_tester.prepare_inputs_for_target()
__lowercase : Tuple = [len(lowercase__ ) for x in speech_inputs]
__lowercase : Dict = feat_extract.model_input_names[0]
__lowercase : List[Any] = BatchFeature({input_name: speech_inputs} )
__lowercase : str = min(lowercase__ )
__lowercase : str = feat_extract.num_mel_bins # hack!
__lowercase : int = feat_extract.pad(
lowercase__ , padding="max_length" , max_length=lowercase__ , truncation=lowercase__ , return_tensors="np" )
self.assertIn("attention_mask" , lowercase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def snake_case ( self : Any , lowercase__ : List[Any] ):
from datasets import load_dataset
__lowercase : Dict = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__lowercase : Any = ds.sort("id" ).select(range(lowercase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def snake_case ( self : int ):
# fmt: off
__lowercase : List[Any] = torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] )
# fmt: on
__lowercase : Any = self._load_datasamples(1 )
__lowercase : List[Any] = SpeechTaFeatureExtractor()
__lowercase : Union[str, Any] = feature_extractor(lowercase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , lowercase__ , atol=1e-6 ) )
def snake_case ( self : Optional[int] ):
# fmt: off
__lowercase : Dict = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
__lowercase : List[str] = self._load_datasamples(1 )
__lowercase : Dict = SpeechTaFeatureExtractor()
__lowercase : Optional[Any] = feature_extractor(audio_target=lowercase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , lowercase__ , atol=1e-4 ) )
| 575
| 1
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
a_ = datasets.load_iris()
a_ = np.array(data['data'])
a_ = np.array(data['target'])
a_ = data['target_names']
a_ , a_ , a_ , a_ = train_test_split(X, y)
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return np.linalg.norm(np.array(UpperCamelCase__ ) - np.array(UpperCamelCase__ ) )
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Any=5 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =zip(UpperCamelCase__, UpperCamelCase__ )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE__ : Tuple =[]
for data_point in data:
SCREAMING_SNAKE_CASE__ : str =euclidean_distance(data_point[0], UpperCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE__ : Optional[Any] =[i[1] for i in sorted(UpperCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE__ : Any =Counter(UpperCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 665
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a( UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : list[int], UpperCamelCase__ : int, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ : Any =f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if colsa != 1:
SCREAMING_SNAKE_CASE__ : str =f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ : str =(
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(UpperCamelCase__ )
if len(UpperCamelCase__ ) != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
'''Number of initial values must be equal to number of rows in coefficient '''
f"matrix but received {len(UpperCamelCase__ )} and {rowsa}"
)
raise ValueError(UpperCamelCase__ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
SCREAMING_SNAKE_CASE__ : NDArray[floataa] =np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =table.shape
strictly_diagonally_dominant(UpperCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] =[]
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for col in range(UpperCamelCase__ ):
if col == row:
SCREAMING_SNAKE_CASE__ : int =table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ : Any =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ : int =(temp + val) / denom
new_val.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_val
return [float(UpperCamelCase__ ) for i in new_val]
def _a( UpperCamelCase__ : NDArray[floataa] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =table.shape
SCREAMING_SNAKE_CASE__ : Any =True
for i in range(0, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : int =0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665
| 1
|
def A ( __UpperCamelCase ) -> float:
return 10 - x * x
def A ( __UpperCamelCase , __UpperCamelCase ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(__UpperCamelCase ) * equation(__UpperCamelCase ) >= 0:
raise ValueError('Wrong space!' )
A__ = a
while (b - a) >= 0.01:
# Find middle point
A__ = (a + b) / 2
# Check if middle point is root
if equation(__UpperCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__UpperCamelCase ) * equation(__UpperCamelCase ) < 0:
A__ = c
else:
A__ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 9
|
from __future__ import annotations
def lowerCAmelCase__ ( a__ , a__ ) ->bool:
'''simple docstring'''
_UpperCamelCase = get_failure_array(a__ )
# 2) Step through text searching for pattern
_UpperCamelCase , _UpperCamelCase = 0, 0 # index into text, pattern
while i < len(a__ ):
if pattern[j] == text[i]:
if j == (len(a__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_UpperCamelCase = failure[j - 1]
continue
i += 1
return False
def lowerCAmelCase__ ( a__ ) ->list[int]:
'''simple docstring'''
_UpperCamelCase = [0]
_UpperCamelCase = 0
_UpperCamelCase = 1
while j < len(a__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_UpperCamelCase = failure[i - 1]
continue
j += 1
failure.append(a__ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase__ = '''abc1abc12'''
lowerCamelCase__ = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCamelCase__ = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase__ = '''ABABX'''
lowerCamelCase__ = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCamelCase__ = '''AAAB'''
lowerCamelCase__ = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCamelCase__ = '''abcdabcy'''
lowerCamelCase__ = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCamelCase__ = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 547
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Union[str, Any] = '''▁'''
_lowerCamelCase : Any = {'''vocab_file''': '''spiece.model'''}
_lowerCamelCase : Any = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
_lowerCamelCase : str = {
'''google/pegasus-xsum''': 512,
}
_lowerCamelCase : Dict = logging.get_logger(__name__)
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase__ , lowercase__="<pad>" , lowercase__="</s>" , lowercase__="<unk>" , lowercase__="<mask_2>" , lowercase__="<mask_1>" , lowercase__=None , lowercase__=1_0_3 , lowercase__ = None , **lowercase__ , ):
'''simple docstring'''
__A =offset
if additional_special_tokens is not None:
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowercase__ )}, but is'''
f''' {type(lowercase__ )}''' )
__A =(
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowercase__ ) , self.offset - 1 )
]
if len(set(lowercase__ ) ) != len(lowercase__ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
__A =additional_special_tokens_extended
else:
__A =[mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
__A ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase__ , unk_token=lowercase__ , mask_token=lowercase__ , pad_token=lowercase__ , mask_token_sent=lowercase__ , offset=lowercase__ , additional_special_tokens=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
__A =mask_token_sent
__A =vocab_file
__A =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
# add special tokens to encoder dict
__A ={
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__A ={v: k for k, v in self.encoder.items()}
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.offset
def __UpperCamelCase ( self ):
'''simple docstring'''
__A ={self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
__A =self.__dict__.copy()
__A =None
return state
def __setstate__( self , lowercase__ ):
'''simple docstring'''
__A =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__A ={}
__A =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__A =self.sp_model.piece_to_id(lowercase__ )
return sp_id + self.offset
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__A =self.sp_model.IdToPiece(index - self.offset )
return token
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
__A =[]
__A =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase__ ) + token
__A =[]
else:
current_sub_tokens.append(lowercase__ )
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def __UpperCamelCase ( self , lowercase__=False ):
'''simple docstring'''
return 1
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
__A =set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __UpperCamelCase ( self , lowercase__ , lowercase__ = None , lowercase__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowercase__ )
elif token_ids_a is None:
return self._special_token_mask(lowercase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __UpperCamelCase ( self , lowercase__ , lowercase__=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self , lowercase__ , lowercase__ = None ):
'''simple docstring'''
if not os.path.isdir(lowercase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__A =os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , '''wb''' ) as fi:
__A =self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 516
|
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
def A__ ( *__A : Optional[int] , **__A : Union[str, Any] ) ->Dict:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : str , **__A : int ) ->List[Any]:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : Dict , **__A : str ) ->Tuple:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : Optional[Any] , **__A : Dict ) ->Optional[Any]:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : str , **__A : str ) ->Any:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : List[Any] , **__A : Dict ) ->Union[str, Any]:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : Optional[int] , **__A : Optional[int] ) ->Any:
requires_backends(__A , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
| 516
| 1
|
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = R"\w+[.]\d+"
UpperCAmelCase_ = re.findall(snake_case_ , snake_case_ )
for pat in pats:
UpperCAmelCase_ = key.replace(snake_case_ , "_".join(pat.split("." ) ) )
return key
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase_ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase_ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase_ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Tuple=42 ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase_ = flax_model.init_weights(PRNGKey(snake_case_ ) )
UpperCAmelCase_ = flatten_dict(snake_case_ )
UpperCAmelCase_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
UpperCAmelCase_ = rename_key_and_reshape_tensor(snake_case_ , snake_case_ , snake_case_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ = jnp.asarray(snake_case_ )
return unflatten_dict(snake_case_ )
| 78
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__lowerCamelCase : Dict = """bart"""
__lowerCamelCase : Union[str, Any] = True
@st.cache(allow_output_mutation=snake_case_ )
def SCREAMING_SNAKE_CASE ( ):
if LOAD_DENSE_INDEX:
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
snake_case__ : str = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
snake_case__ : Dict = qar_model.eval()
else:
snake_case__, snake_case__ : str = (None, None)
if MODEL_TYPE == "bart":
snake_case__ : str = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
snake_case__ : int = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
snake_case__ : str = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
snake_case__ : List[Any] = sas_model.eval()
else:
snake_case__, snake_case__ : Any = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case_ )
def SCREAMING_SNAKE_CASE ( ):
if LOAD_DENSE_INDEX:
snake_case__ : Optional[int] = faiss.StandardGpuResources()
snake_case__ : int = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
snake_case__ : Tuple = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
snake_case__ : int = faiss.IndexFlatIP(128 )
snake_case__ : Dict = faiss.index_cpu_to_gpu(snake_case_ , 1 , snake_case_ )
wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU
else:
snake_case__, snake_case__ : int = (None, None)
snake_case__ : Tuple = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case_ )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = datasets.load_dataset("eli5" , name="LFQA_reddit" )
snake_case__ : Dict = elia["train_eli5"]
snake_case__ : Optional[int] = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) )
snake_case__ : List[str] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case_ )
return (elia_train, eli5_train_q_index)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = load_indexes()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = load_models()
__lowerCamelCase , __lowerCamelCase : List[str] = load_train_data()
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Optional[int]=10 ):
snake_case__ : Optional[Any] = embed_questions_for_retrieval([question] , snake_case_ , snake_case_ )
snake_case__, snake_case__ : int = eli5_train_q_index.search(snake_case_ , snake_case_ )
snake_case__ : Optional[int] = [elia_train[int(snake_case_ )] for i in I[0]]
return nn_examples
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : List[Any]="wiki40b" , snake_case_ : Optional[int]="dense" , snake_case_ : List[str]=10 ):
if source == "none":
snake_case__, snake_case__ : Tuple = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
snake_case__, snake_case__ : Tuple = query_qa_dense_index(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
snake_case__, snake_case__ : Dict = query_es_index(
snake_case_ , snake_case_ , index_name="english_wiki40b_snippets_100w" , n_results=snake_case_ , )
snake_case__ : Optional[Any] = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
snake_case__ : int = "question: {} context: {}".format(snake_case_ , snake_case_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None),
} )
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any]=64 , snake_case_ : List[str]=256 , snake_case_ : Union[str, Any]=False , snake_case_ : Optional[Any]=2 , snake_case_ : str=0.95 , snake_case_ : Optional[Any]=0.8 ):
with torch.no_grad():
snake_case__ : List[str] = qa_sas_generate(
snake_case_ , snake_case_ , snake_case_ , num_answers=1 , num_beams=snake_case_ , min_len=snake_case_ , max_len=snake_case_ , do_sample=snake_case_ , temp=snake_case_ , top_p=snake_case_ , top_k=snake_case_ , max_input_length=1024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
__lowerCamelCase : Dict = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
__lowerCamelCase : Dict = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__lowerCamelCase : List[Any] = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
__lowerCamelCase : Optional[int] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
__lowerCamelCase : Dict = st.sidebar.checkbox("""Demo options""")
if demo_options:
__lowerCamelCase : Tuple = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
__lowerCamelCase : Optional[Any] = action_list.index(action_st)
__lowerCamelCase : int = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
__lowerCamelCase : List[Any] = show_type == """Show full text of passages"""
else:
__lowerCamelCase : Any = 3
__lowerCamelCase : str = True
__lowerCamelCase : Optional[Any] = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
__lowerCamelCase : Any = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
__lowerCamelCase : List[str] = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
__lowerCamelCase : int = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
__lowerCamelCase : Optional[int] = """wiki40b"""
__lowerCamelCase : Optional[Any] = """dense"""
__lowerCamelCase : int = """beam"""
__lowerCamelCase : Optional[Any] = 2
__lowerCamelCase : Any = 64
__lowerCamelCase : List[str] = 256
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : int = None
__lowerCamelCase : Any = st.sidebar.checkbox("""Generation options""")
if generate_options:
__lowerCamelCase : Optional[Any] = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
__lowerCamelCase : Optional[Any] = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
__lowerCamelCase : Optional[Any] = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__lowerCamelCase : List[str] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__lowerCamelCase : Optional[Any] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__lowerCamelCase : List[str] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__lowerCamelCase : str = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__lowerCamelCase : Any = None
# start main text
__lowerCamelCase : Any = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
__lowerCamelCase : Dict = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__lowerCamelCase : Optional[Any] = st.text_input("""Enter your question here:""", """""")
else:
__lowerCamelCase : List[str] = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
__lowerCamelCase , __lowerCamelCase : Tuple = make_support(question, source=wiki_source, method="""dense""", n_results=10)
__lowerCamelCase , __lowerCamelCase : Optional[Any] = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
__lowerCamelCase : int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__lowerCamelCase : List[str] = support_list[:10]
__lowerCamelCase : Tuple = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__lowerCamelCase , __lowerCamelCase : List[str] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
__lowerCamelCase : List[str] = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
__lowerCamelCase : str = res[1].strip()
if sec_titles == "":
__lowerCamelCase : Union[str, Any] = """[{}]({})""".format(res[0], wiki_url)
else:
__lowerCamelCase : List[str] = sec_titles.split(""" & """)
__lowerCamelCase : Dict = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
__lowerCamelCase : Optional[Any] = find_nearest_training(question)
__lowerCamelCase : Optional[Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
__lowerCamelCase : Union[str, Any] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
__lowerCamelCase : List[str] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 297
| 0
|
'''simple docstring'''
def _snake_case ( A_ : int ):
"""simple docstring"""
a_ : Optional[int] = [1]
a_ : str = 0, 0, 0
a_ : Any = ugly_nums[ia] * 2
a_ : List[Any] = ugly_nums[ia] * 3
a_ : Union[str, Any] = ugly_nums[ia] * 5
for _ in range(1 , A_ ):
a_ : Any = min(A_ , A_ , A_ )
ugly_nums.append(A_ )
if next_num == next_a:
ia += 1
a_ : Any = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
a_ : Optional[int] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
a_ : int = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(2_00) = }""")
| 713
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase__ )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = field(default="automatic-speech-recognition" ,metadata={"include_in_asdict_even_if_is_default": True} )
a_ = Features({"audio": Audio()} )
a_ = Features({"transcription": Value("string" )} )
a_ = "audio"
a_ = "transcription"
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , lowerCAmelCase_ ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
a_ : int = copy.deepcopy(self )
a_ : Optional[int] = self.input_schema.copy()
a_ : Any = features[self.audio_column]
a_ : Any = input_schema
return task_template
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 460
| 0
|
import math
class A_ :
'''simple docstring'''
def __init__( self: List[Any] , a: str=0 ): # a graph with Node 0,1,...,N-1
__lowerCamelCase : Dict = n
__lowerCamelCase : List[str] = [
[math.inf for j in range(0 , __SCREAMING_SNAKE_CASE )] for i in range(0 , __SCREAMING_SNAKE_CASE )
] # adjacency matrix for weight
__lowerCamelCase : Dict = [
[math.inf for j in range(0 , __SCREAMING_SNAKE_CASE )] for i in range(0 , __SCREAMING_SNAKE_CASE )
] # dp[i][j] stores minimum distance from i to j
def _snake_case ( self: int , a: Tuple , a: Optional[Any] , a: Dict ):
__lowerCamelCase : int = w
def _snake_case ( self: Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__lowerCamelCase : Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def _snake_case ( self: Union[str, Any] , a: Union[str, Any] , a: List[Any] ):
return self.dp[u][v]
if __name__ == "__main__":
lowercase_ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 669
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowercase_ ( SCREAMING_SNAKE_CASE : ndarray ):
"""simple docstring"""
return np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , *,
__SCREAMING_SNAKE_CASE = np.inf , __SCREAMING_SNAKE_CASE = "linear" , __SCREAMING_SNAKE_CASE = 0.0 , ) -> None:
"""simple docstring"""
snake_case__ : int =regularization
snake_case__ : Tuple =gamma
if kernel == "linear":
snake_case__ : str =self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
snake_case__ : Dict =self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
snake_case__ : Optional[int] =f'''Unknown kernel: {kernel}'''
raise ValueError(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
snake_case__ : Optional[Any] =observations
snake_case__ : Union[str, Any] =classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((snake_case__), ) : Dict =np.shape(__SCREAMING_SNAKE_CASE )
def to_minimize(__SCREAMING_SNAKE_CASE ) -> float:
snake_case__ : List[str] =0
((snake_case__), ) : int =np.shape(__SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(__SCREAMING_SNAKE_CASE ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =LinearConstraint(__SCREAMING_SNAKE_CASE , 0 , 0 )
snake_case__ : int =Bounds(0 , self.regularization )
snake_case__ : Union[str, Any] =minimize(
__SCREAMING_SNAKE_CASE , np.ones(__SCREAMING_SNAKE_CASE ) , bounds=__SCREAMING_SNAKE_CASE , constraints=[ly_contraint] ).x
snake_case__ : Optional[int] =l_star
# calculating mean offset of separation plane to points
snake_case__ : Optional[Any] =0
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(__SCREAMING_SNAKE_CASE ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
snake_case__ : int =s / n
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
snake_case__ : int =sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __SCREAMING_SNAKE_CASE )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 381
| 0
|
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def snake_case__ ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
lowerCAmelCase = Dataset.from_dict(_A )
return dataset
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = get_dataset()
lowerCAmelCase = make_duplicate_clusters(__lowerCAmelCase , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = get_dataset()
lowerCAmelCase , lowerCAmelCase = deduplicate_dataset(__lowerCAmelCase)
self.assertEqual(len(__lowerCAmelCase) , 2)
print(__lowerCAmelCase)
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2)
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , __lowerCAmelCase)
| 717
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(__lowerCAmelCase))
lowerCAmelCase = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
lowerCAmelCase = os.path.join(self.tmpdirname , __lowerCAmelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor_slow.save_pretrained(self.tmpdirname)
lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase)
lowerCAmelCase = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor_fast.save_pretrained(self.tmpdirname)
lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase)
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase)
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
lowerCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0)
lowerCAmelCase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""")
lowerCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """lower newer"""
lowerCAmelCase = processor(text=__lowerCAmelCase)
lowerCAmelCase = tokenizer(__lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ["""input_ids""", """attention_mask""", """pixel_values"""])
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase):
processor()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(__lowerCAmelCase)
lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 605
| 0
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def A(__a: Dict , __a: Optional[Any] , __a: int ):
return params[F"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def A(__a: List[Any] , __a: Optional[int] , __a: List[str] , __a: Dict="attention" ):
lowerCAmelCase_ = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
lowerCAmelCase_ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowerCAmelCase_ = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
lowerCAmelCase_ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowerCAmelCase_ = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
lowerCAmelCase_ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowerCAmelCase_ = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
lowerCAmelCase_ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def A(__a: Any , __a: List[str] , __a: Optional[int] , __a: Any=False ):
if split_mlp_wi:
lowerCAmelCase_ = params[F"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
lowerCAmelCase_ = params[F"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
lowerCAmelCase_ = (wi_a, wi_a)
else:
lowerCAmelCase_ = params[F"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
lowerCAmelCase_ = params[F"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def A(__a: List[str] , __a: Any , __a: Any , __a: List[Any] ):
return params[F"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def A(__a: dict , *, __a: int , __a: bool , __a: bool = False ):
lowerCAmelCase_ = traverse_util.flatten_dict(variables["target"] )
lowerCAmelCase_ = {'''/'''.join(snake_case__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase_ = '''encoder/encoder/mlp/wi_0/kernel''' in old
print("Split MLP:" , snake_case__ )
lowerCAmelCase_ = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase_ = old['''token_embedder/embedding''']
# Encoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ = tax_layer_norm_lookup(snake_case__ , snake_case__ , "encoder" , "pre_attention_layer_norm" )
lowerCAmelCase_ = tax_attention_lookup(snake_case__ , snake_case__ , "encoder" , "attention" )
lowerCAmelCase_ = layer_norm
lowerCAmelCase_ = k.T
lowerCAmelCase_ = o.T
lowerCAmelCase_ = q.T
lowerCAmelCase_ = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase_ = tax_layer_norm_lookup(snake_case__ , snake_case__ , "encoder" , "pre_mlp_layer_norm" )
lowerCAmelCase_ = tax_mlp_lookup(snake_case__ , snake_case__ , "encoder" , snake_case__ )
lowerCAmelCase_ = layer_norm
if split_mlp_wi:
lowerCAmelCase_ = wi[0].T
lowerCAmelCase_ = wi[1].T
else:
lowerCAmelCase_ = wi.T
lowerCAmelCase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCAmelCase_ = tax_relpos_bias_lookup(
snake_case__ , snake_case__ , "encoder" ).T
lowerCAmelCase_ = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
lowerCAmelCase_ = tax_relpos_bias_lookup(
snake_case__ , 0 , "encoder" ).T
lowerCAmelCase_ = tax_relpos_bias_lookup(
snake_case__ , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ = tax_layer_norm_lookup(snake_case__ , snake_case__ , "decoder" , "pre_self_attention_layer_norm" )
lowerCAmelCase_ = tax_attention_lookup(snake_case__ , snake_case__ , "decoder" , "self_attention" )
lowerCAmelCase_ = layer_norm
lowerCAmelCase_ = k.T
lowerCAmelCase_ = o.T
lowerCAmelCase_ = q.T
lowerCAmelCase_ = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase_ = tax_layer_norm_lookup(snake_case__ , snake_case__ , "decoder" , "pre_cross_attention_layer_norm" )
lowerCAmelCase_ = tax_attention_lookup(snake_case__ , snake_case__ , "decoder" , "encoder_decoder_attention" )
lowerCAmelCase_ = layer_norm
lowerCAmelCase_ = k.T
lowerCAmelCase_ = o.T
lowerCAmelCase_ = q.T
lowerCAmelCase_ = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase_ = tax_layer_norm_lookup(snake_case__ , snake_case__ , "decoder" , "pre_mlp_layer_norm" )
lowerCAmelCase_ = tax_mlp_lookup(snake_case__ , snake_case__ , "decoder" , snake_case__ )
lowerCAmelCase_ = layer_norm
if split_mlp_wi:
lowerCAmelCase_ = wi[0].T
lowerCAmelCase_ = wi[1].T
else:
lowerCAmelCase_ = wi.T
lowerCAmelCase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCAmelCase_ = tax_relpos_bias_lookup(snake_case__ , snake_case__ , "decoder" ).T
lowerCAmelCase_ = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase_ = old['''decoder/logits_dense/kernel'''].T
return new
def A(__a: Union[str, Any] , __a: bool ):
lowerCAmelCase_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowerCAmelCase_ = state_dict['''shared.weight''']
return state_dict
def A(__a: int , __a: str , __a: Optional[Any] , __a: Any , __a: str ):
lowerCAmelCase_ = checkpoints.load_tax_checkpoint(snake_case__ )
lowerCAmelCase_ = convert_tax_to_pytorch(
snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ , scalable_attention=snake_case__ )
lowerCAmelCase_ = make_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ , strict=snake_case__ )
def A(__a: List[str] , __a: List[str] , __a: Optional[Any] , __a: bool = False , __a: bool = False , ):
lowerCAmelCase_ = MTaConfig.from_json_file(snake_case__ )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase_ = UMTaEncoderModel(snake_case__ )
else:
lowerCAmelCase_ = UMTaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(snake_case__ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case__ )
print("Done" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
lowerCamelCase__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 122
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase : Any = 1_6
_lowerCAmelCase : Optional[int] = 3_2
def _A ( snake_case__ : Accelerator , snake_case__ : int = 16 ):
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case__ : str = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case__ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : List[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ : Tuple = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ : Tuple = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case__ : int = 8
else:
snake_case__ : List[Any] = None
return tokenizer.pad(
snake_case__ , padding='''longest''' , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case__ : int = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
snake_case__ : Optional[int] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCAmelCase : Tuple = mocked_dataloaders # noqa: F811
def _A ( snake_case__ : Optional[Any] , snake_case__ : int ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case__ ) == "1":
snake_case__ : Any = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
snake_case__ : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
snake_case__ : List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : List[str] = config['''lr''']
snake_case__ : List[Any] = int(config['''num_epochs'''] )
snake_case__ : List[Any] = int(config['''seed'''] )
snake_case__ : Union[str, Any] = int(config['''batch_size'''] )
set_seed(snake_case__ )
snake_case__ ,snake_case__ : Tuple = get_dataloaders(snake_case__ , snake_case__ )
snake_case__ : Union[str, Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
snake_case__ : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case__ : Dict = batch_size // MAX_GPU_BATCH_SIZE
snake_case__ : int = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : Tuple = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ : Dict = model.to(accelerator.device )
# Instantiate optimizer
snake_case__ : List[Any] = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
snake_case__ : Dict = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ : List[str] = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
snake_case__ : Union[str, Any] = os.path.split(snake_case__ )[-1].split('''.''' )[0]
accelerator.init_trackers(snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
snake_case__ : Tuple = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case__ : Optional[Any] = model(**snake_case__ )
snake_case__ : List[str] = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
snake_case__ : str = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : int = model(**snake_case__ )
snake_case__ : str = outputs.logits.argmax(dim=-1 )
snake_case__ ,snake_case__ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
snake_case__ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , snake_case__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(snake_case__ ),
'''epoch''': epoch,
} , step=snake_case__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _A ( ):
snake_case__ : Optional[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=snake_case__ , default=snake_case__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=snake_case__ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
snake_case__ : List[Any] = parser.parse_args()
snake_case__ : Any = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 261
| 0
|
from __future__ import annotations
import time
UpperCAmelCase = list[tuple[int, int]]
UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = pos_x
lowercase = pos_y
lowercase = (pos_y, pos_x)
lowercase = goal_x
lowercase = goal_y
lowercase = parent
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case ):
lowercase = Node(start[1] , start[0] , goal[1] , goal[0] , snake_case )
lowercase = Node(goal[1] , goal[0] , goal[1] , goal[0] , snake_case )
lowercase = [self.start]
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self ):
while self.node_queue:
lowercase = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowercase = True
return self.retrace_path(snake_case )
lowercase = self.get_successors(snake_case )
for node in successors:
self.node_queue.append(snake_case )
if not self.reached:
return [self.start.pos]
return None
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = []
for action in delta:
lowercase = parent.pos_x + action[1]
lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(snake_case , snake_case , self.target.pos_y , self.target.pos_x , snake_case ) )
return successors
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = node
lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowercase = current_node.parent
path.reverse()
return path
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case ):
lowercase = BreadthFirstSearch(snake_case , snake_case )
lowercase = BreadthFirstSearch(snake_case , snake_case )
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowercase = self.fwd_bfs.node_queue.pop(0 )
lowercase = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowercase = True
return self.retrace_bidirectional_path(
snake_case , snake_case )
lowercase = current_bwd_node
lowercase = current_fwd_node
lowercase = {
self.fwd_bfs: self.fwd_bfs.get_successors(snake_case ),
self.bwd_bfs: self.bwd_bfs.get_successors(snake_case ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(snake_case )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = self.fwd_bfs.retrace_path(snake_case )
lowercase = self.bwd_bfs.retrace_path(snake_case )
bwd_path.pop()
bwd_path.reverse()
lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
UpperCAmelCase = (0, 0)
UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCAmelCase = time.time()
UpperCAmelCase = BreadthFirstSearch(init, goal)
UpperCAmelCase = bfs.search()
UpperCAmelCase = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
UpperCAmelCase = time.time()
UpperCAmelCase = BidirectionalBreadthFirstSearch(init, goal)
UpperCAmelCase = bd_bfs.search()
UpperCAmelCase = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 565
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if "model" in orig_key:
lowercase = orig_key.replace('model.' , '' )
if "norm1" in orig_key:
lowercase = orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
lowercase = orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
lowercase = orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
lowercase = orig_key.split('.' )[0].split('_' )[-1]
lowercase = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
lowercase = orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
lowercase = orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
lowercase = orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
lowercase = orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
lowercase = orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
lowercase = orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
lowercase = orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
lowercase = orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
lowercase = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
lowercase = orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
lowercase = 'yoso.' + orig_key
return orig_key
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowercase = val
lowercase = orig_state_dict['cls.predictions.decoder.bias']
lowercase = torch.arange(__SCREAMING_SNAKE_CASE ).expand((1, -1) ) + 2
return orig_state_dict
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model_state_dict']
lowercase = YosoConfig.from_json_file(__SCREAMING_SNAKE_CASE )
lowercase = YosoForMaskedLM(__SCREAMING_SNAKE_CASE )
lowercase = convert_checkpoint_helper(config.max_position_embeddings , __SCREAMING_SNAKE_CASE )
print(model.load_state_dict(__SCREAMING_SNAKE_CASE ) )
model.eval()
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 565
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_a : Tuple = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_a : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 479
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ : Union[str, Any] = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 698
| 0
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
__a = {
"""b0""": {
"""hidden_dim""": 1280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def _UpperCamelCase ( lowerCAmelCase_ ) ->Optional[int]:
UpperCAmelCase = EfficientNetConfig()
UpperCAmelCase = CONFIG_MAP[model_name]["""hidden_dim"""]
UpperCAmelCase = CONFIG_MAP[model_name]["""width_coef"""]
UpperCAmelCase = CONFIG_MAP[model_name]["""depth_coef"""]
UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
UpperCAmelCase = CONFIG_MAP[model_name]["""dropout_rate"""]
UpperCAmelCase = CONFIG_MAP[model_name]["""dw_padding"""]
UpperCAmelCase = """huggingface/label-files"""
UpperCAmelCase = """imagenet-1k-id2label.json"""
UpperCAmelCase = 1_0_0_0
UpperCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( ) ->Tuple:
UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
def _UpperCamelCase ( lowerCAmelCase_ ) ->List[str]:
UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
UpperCAmelCase = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=lowerCAmelCase_ , )
return preprocessor
def _UpperCamelCase ( lowerCAmelCase_ ) ->Any:
UpperCAmelCase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
UpperCAmelCase = sorted(set(lowerCAmelCase_ ) )
UpperCAmelCase = len(lowerCAmelCase_ )
UpperCAmelCase = {b: str(lowerCAmelCase_ ) for b, i in zip(lowerCAmelCase_ , range(lowerCAmelCase_ ) )}
UpperCAmelCase = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
UpperCAmelCase = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
UpperCAmelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCAmelCase = """efficientnet.""" + item[1]
UpperCAmelCase = """classifier.weight"""
UpperCAmelCase = """classifier.bias"""
return key_mapping
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCAmelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCAmelCase = torch.from_numpy(lowerCAmelCase_ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCAmelCase = torch.from_numpy(lowerCAmelCase_ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCAmelCase = torch.from_numpy(np.transpose(lowerCAmelCase_ ) )
else:
UpperCAmelCase = torch.from_numpy(lowerCAmelCase_ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCAmelCase_ )
@torch.no_grad()
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->Optional[Any]:
UpperCAmelCase = model_classes[model_name](
include_top=lowerCAmelCase_ , weights="""imagenet""" , input_tensor=lowerCAmelCase_ , input_shape=lowerCAmelCase_ , pooling=lowerCAmelCase_ , classes=1_0_0_0 , classifier_activation="""softmax""" , )
UpperCAmelCase = original_model.trainable_variables
UpperCAmelCase = original_model.non_trainable_variables
UpperCAmelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCAmelCase = param.numpy()
UpperCAmelCase = list(tf_params.keys() )
# Load HuggingFace model
UpperCAmelCase = get_efficientnet_config(lowerCAmelCase_ )
UpperCAmelCase = EfficientNetForImageClassification(lowerCAmelCase_ ).eval()
UpperCAmelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
UpperCAmelCase = rename_keys(lowerCAmelCase_ )
replace_params(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Initialize preprocessor and preprocess input image
UpperCAmelCase = convert_image_processor(lowerCAmelCase_ )
UpperCAmelCase = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCAmelCase = hf_model(**lowerCAmelCase_ )
UpperCAmelCase = outputs.logits.detach().numpy()
# Original model inference
UpperCAmelCase = False
UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
UpperCAmelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCAmelCase = image.img_to_array(lowerCAmelCase_ )
UpperCAmelCase = np.expand_dims(lowerCAmelCase_ , axis=0 )
UpperCAmelCase = original_model.predict(lowerCAmelCase_ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCAmelCase_ ):
os.mkdir(lowerCAmelCase_ )
# Save converted model and image processor
hf_model.save_pretrained(lowerCAmelCase_ )
preprocessor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
UpperCAmelCase = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowerCAmelCase_ )
hf_model.push_to_hub(lowerCAmelCase_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
__a = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 701
|
from math import sqrt
def _UpperCamelCase ( lowerCAmelCase_ = 1_0_0_0_0_0_0 ) ->int:
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowerCAmelCase_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 627
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 551
|
'''simple docstring'''
import qiskit
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
UpperCAmelCase = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
UpperCAmelCase = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 447
| 0
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__snake_case : Union[str, Any] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowercase ( lowerCamelCase__ : Optional[int] ):
_a = {}
state_dict.pop("pixel_mean", lowerCamelCase__ )
state_dict.pop("pixel_std", lowerCamelCase__ )
_a = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a = key.replace(lowerCamelCase__, lowerCamelCase__ )
if re.match(lowerCamelCase__, lowerCamelCase__ ):
_a = int(re.match(lowerCamelCase__, lowerCamelCase__ ).group(2 ) )
if layer_nb == 0:
_a = key.replace("layers.0", "proj_in" )
elif layer_nb == 1:
_a = key.replace("layers.1", "layers.0" )
elif layer_nb == 2:
_a = key.replace("layers.2", "proj_out" )
_a = value
_a = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Optional[int], lowerCamelCase__ : int="ybelkada/segment-anything" ):
_a = hf_hub_download(lowerCamelCase__, F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
_a = SamConfig()
elif "sam_vit_l" in model_name:
_a = SamVisionConfig(
hidden_size=1_024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
elif "sam_vit_h" in model_name:
_a = SamVisionConfig(
hidden_size=1_280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
_a = torch.load(lowerCamelCase__, map_location="cpu" )
_a = replace_keys(lowerCamelCase__ )
_a = SamImageProcessor()
_a = SamProcessor(image_processor=lowerCamelCase__ )
_a = SamModel(lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
_a = hf_model.to("cuda" )
_a = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" )
_a = [[[400, 650]]]
_a = [[1]]
_a = processor(images=np.array(lowerCamelCase__ ), return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_a = ((75, 275, 1_725, 850),)
_a = processor(images=np.array(lowerCamelCase__ ), input_boxes=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_a = [[[400, 650], [800, 650]]]
_a = [[1, 1]]
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
__snake_case : int = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__snake_case : Tuple = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 711
|
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index], lowerCamelCase__, lowerCamelCase__ ):
# Color current vertex
_a = i
# Validate coloring
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, index + 1 ):
return True
# Backtrack
_a = -1
return False
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int ):
_a = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, 0 ):
return colored_vertices
return []
| 691
| 0
|
from __future__ import annotations
import math
import random
from typing import Any
class __A :
def __init__( self :Dict ):
'''simple docstring'''
__magic_name__ : list[Any] =[]
__magic_name__ : int =0
__magic_name__ : int =0
def A__ ( self :int ):
'''simple docstring'''
return self.head == self.tail
def A__ ( self :Optional[Any] , __snake_case :Any ):
'''simple docstring'''
self.data.append(__snake_case )
__magic_name__ : Optional[int] =self.tail + 1
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =self.data[self.head]
__magic_name__ : Any =self.head + 1
return ret
def A__ ( self :Dict ):
'''simple docstring'''
return self.tail - self.head
def A__ ( self :Any ):
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class __A :
def __init__( self :Tuple , __snake_case :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =data
__magic_name__ : MyNode | None =None
__magic_name__ : MyNode | None =None
__magic_name__ : int =1
def A__ ( self :Any ):
'''simple docstring'''
return self.data
def A__ ( self :int ):
'''simple docstring'''
return self.left
def A__ ( self :int ):
'''simple docstring'''
return self.right
def A__ ( self :str ):
'''simple docstring'''
return self.height
def A__ ( self :Tuple , __snake_case :Any ):
'''simple docstring'''
__magic_name__ : int =data
def A__ ( self :Dict , __snake_case :MyNode | None ):
'''simple docstring'''
__magic_name__ : Any =node
def A__ ( self :str , __snake_case :MyNode | None ):
'''simple docstring'''
__magic_name__ : Tuple =node
def A__ ( self :Optional[Any] , __snake_case :int ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =height
def lowerCAmelCase_ ( lowerCamelCase ):
if node is None:
return 0
return node.get_height()
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
if a > b:
return a
return b
def lowerCAmelCase_ ( lowerCamelCase ):
print("""left rotation node:""" , node.get_data() )
__magic_name__ : Any =node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowerCamelCase )
__magic_name__ : Optional[int] =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCamelCase )
__magic_name__ : List[str] =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCamelCase )
return ret
def lowerCAmelCase_ ( lowerCamelCase ):
print("""right rotation node:""" , node.get_data() )
__magic_name__ : List[str] =node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowerCamelCase )
__magic_name__ : Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCamelCase )
__magic_name__ : str =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCamelCase )
return ret
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Dict =node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowerCamelCase ) )
return right_rotation(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : int =node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowerCamelCase ) )
return left_rotation(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
if node is None:
return MyNode(lowerCamelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowerCamelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
__magic_name__ : int =node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
__magic_name__ : Tuple =right_rotation(lowerCamelCase )
else:
__magic_name__ : int =lr_rotation(lowerCamelCase )
else:
node.set_right(insert_node(node.get_right() , lowerCamelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
__magic_name__ : List[Any] =node.get_right()
assert right_child is not None
if data < right_child.get_data():
__magic_name__ : Tuple =rl_rotation(lowerCamelCase )
else:
__magic_name__ : Optional[Any] =left_rotation(lowerCamelCase )
__magic_name__ : Optional[int] =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCamelCase )
return node
def lowerCAmelCase_ ( lowerCamelCase ):
while True:
__magic_name__ : str =root.get_right()
if right_child is None:
break
__magic_name__ : List[Any] =right_child
return root.get_data()
def lowerCAmelCase_ ( lowerCamelCase ):
while True:
__magic_name__ : List[Any] =root.get_left()
if left_child is None:
break
__magic_name__ : Optional[int] =left_child
return root.get_data()
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : str =root.get_left()
__magic_name__ : Union[str, Any] =root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
__magic_name__ : Union[str, Any] =get_left_most(lowerCamelCase )
root.set_data(lowerCamelCase )
root.set_right(del_node(lowerCamelCase , lowerCamelCase ) )
elif left_child is not None:
__magic_name__ : Any =left_child
elif right_child is not None:
__magic_name__ : Tuple =right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(lowerCamelCase , lowerCamelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowerCamelCase , lowerCamelCase ) )
if get_height(lowerCamelCase ) - get_height(lowerCamelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
__magic_name__ : Optional[Any] =left_rotation(lowerCamelCase )
else:
__magic_name__ : Union[str, Any] =rl_rotation(lowerCamelCase )
elif get_height(lowerCamelCase ) - get_height(lowerCamelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
__magic_name__ : Any =right_rotation(lowerCamelCase )
else:
__magic_name__ : Optional[int] =lr_rotation(lowerCamelCase )
__magic_name__ : Dict =my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowerCamelCase )
return root
class __A :
def __init__( self :List[Any] ):
'''simple docstring'''
__magic_name__ : MyNode | None =None
def A__ ( self :str ):
'''simple docstring'''
return get_height(self.root )
def A__ ( self :Tuple , __snake_case :Any ):
'''simple docstring'''
print("""insert:""" + str(__snake_case ) )
__magic_name__ : Optional[int] =insert_node(self.root , __snake_case )
def A__ ( self :Tuple , __snake_case :Any ):
'''simple docstring'''
print("""delete:""" + str(__snake_case ) )
if self.root is None:
print("""Tree is empty!""" )
return
__magic_name__ : str =del_node(self.root , __snake_case )
def __str__( self :List[str] , ): # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
__magic_name__ : str =""""""
__magic_name__ : List[Any] =MyQueue()
q.push(self.root )
__magic_name__ : str =self.get_height()
if layer == 0:
return output
__magic_name__ : List[Any] =0
while not q.is_empty():
__magic_name__ : Any =q.pop()
__magic_name__ : str =""" """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(__snake_case )
q.push(__snake_case )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
__magic_name__ : Optional[Any] =cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , __snake_case ) - 1:
__magic_name__ : str =layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def lowerCAmelCase_ ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
UpperCAmelCase_ : int = AVLtree()
UpperCAmelCase_ : Dict = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 21
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =OmegaConf.load(lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(lowerCamelCase ) ) )
return config
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
if conf_path is None:
__magic_name__ : List[str] ="""./model_checkpoints/vqgan_only.yaml"""
__magic_name__ : Dict =load_config(lowerCamelCase , display=lowerCamelCase )
__magic_name__ : Tuple =VQModel(**config.model.params )
if ckpt_path is None:
__magic_name__ : Optional[Any] ="""./model_checkpoints/vqgan_only.pt"""
__magic_name__ : Tuple =torch.load(lowerCamelCase , map_location=lowerCamelCase )
if ".ckpt" in ckpt_path:
__magic_name__ : Any =sd["""state_dict"""]
model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
model.to(lowerCamelCase )
del sd
return model
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] =model.encode(lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
__magic_name__ : List[Any] =model.decode(lowerCamelCase )
return xrec
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ , __magic_name__ : Optional[int] =string.rsplit(""".""" , 1 )
if reload:
__magic_name__ : Optional[int] =importlib.import_module(lowerCamelCase )
importlib.reload(lowerCamelCase )
return getattr(importlib.import_module(lowerCamelCase , package=lowerCamelCase ) , cls )
def lowerCAmelCase_ ( lowerCamelCase ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=True , lowerCamelCase=True ):
__magic_name__ : str =instantiate_from_config(lowerCamelCase )
if sd is not None:
model.load_state_dict(lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# load the specified checkpoint
if ckpt:
__magic_name__ : str =torch.load(lowerCamelCase , map_location="""cpu""" )
__magic_name__ : Any =pl_sd["""global_step"""]
print(F"loaded model from global step {global_step}." )
else:
__magic_name__ : List[Any] ={"""state_dict""": None}
__magic_name__ : Optional[Any] =None
__magic_name__ : Tuple =load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=lowerCamelCase , eval_mode=lowerCamelCase )["""model"""]
return model, global_step
| 21
| 1
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase__ :
def __init__( self ,A__ ,A__=13 ,A__=10 ,A__=3 ,A__=2 ,A__=2 ,A__=2 ,A__=True ,A__=True ,A__=32 ,A__=5 ,A__=4 ,A__=37 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=10 ,A__=0.02 ,A__=0.9 ,A__=None ,):
_A : List[str] = parent
_A : str = batch_size
_A : str = image_size
_A : str = num_channels
_A : Optional[int] = patch_size
_A : List[Any] = tubelet_size
_A : List[Any] = num_frames
_A : Tuple = is_training
_A : Optional[Any] = use_labels
_A : Tuple = hidden_size
_A : Optional[Any] = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : str = hidden_act
_A : Union[str, Any] = hidden_dropout_prob
_A : Dict = attention_probs_dropout_prob
_A : str = type_sequence_label_size
_A : Any = initializer_range
_A : Optional[Any] = mask_ratio
_A : Any = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_A : Optional[Any] = (image_size // patch_size) ** 2
_A : Optional[Any] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_A : Tuple = int(mask_ratio * self.seq_length )
def A__ ( self ):
_A : List[str] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_A : Any = None
if self.use_labels:
_A : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_A : Tuple = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
return VideoMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,tubelet_size=self.tubelet_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A__ ,initializer_range=self.initializer_range ,)
def A__ ( self ,A__ ,A__ ,A__ ):
_A : List[str] = VideoMAEModel(config=A__ )
model.to(A__ )
model.eval()
_A : List[str] = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self ,A__ ,A__ ,A__ ):
_A : Optional[Any] = VideoMAEForPreTraining(A__ )
model.to(A__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_A : Optional[int] = torch.ones((self.num_masks,) )
_A : int = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_A : Tuple = mask.expand(self.batch_size ,-1 ).bool()
_A : Optional[Any] = model(A__ ,A__ )
# model only returns predictions for masked patches
_A : str = mask.sum().item()
_A : List[str] = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_masked_patches, decoder_num_labels) )
def A__ ( self ):
_A : int = self.prepare_config_and_inputs()
_A , _A , _A : Union[str, Any] = config_and_inputs
_A : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( __snake_case , __snake_case , unittest.TestCase ):
__snake_case : Optional[int] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__snake_case : Optional[int] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__snake_case : Tuple = False
__snake_case : Tuple = False
__snake_case : Any = False
__snake_case : List[str] = False
def A__ ( self ):
_A : Union[str, Any] = VideoMAEModelTester(self )
_A : str = ConfigTester(self ,config_class=A__ ,has_text_modality=A__ ,hidden_size=37 )
def A__ ( self ,A__ ,A__ ,A__=False ):
_A : Optional[Any] = copy.deepcopy(A__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_A : Any = torch.ones((self.model_tester.num_masks,) )
_A : str = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_A : Optional[Any] = mask.expand(self.model_tester.batch_size ,-1 ).bool()
_A : int = bool_masked_pos.to(A__ )
if return_labels:
if model_class in [
*get_values(A__ ),
]:
_A : Tuple = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A__ )
return inputs_dict
def A__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def A__ ( self ):
pass
def A__ ( self ):
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Optional[int] = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_A : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ ,nn.Linear ) )
def A__ ( self ):
_A , _A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Tuple = model_class(A__ )
_A : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : Optional[Any] = [*signature.parameters.keys()]
_A : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,A__ )
def A__ ( self ):
_A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def A__ ( self ):
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A__ )
@slow
def A__ ( self ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : str = VideoMAEModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def A__ ( self ):
if not self.has_attentions:
pass
else:
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
_A : Any = True
for model_class in self.all_model_classes:
_A : str = self.model_tester.seq_length - self.model_tester.num_masks
_A : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_A : Optional[int] = True
_A : List[Any] = False
_A : List[str] = True
_A : Optional[int] = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
_A : str = model(**self._prepare_for_class(A__ ,A__ ) )
_A : Dict = outputs.attentions
self.assertEqual(len(A__ ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_A : Optional[Any] = True
_A : Dict = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
_A : Dict = model(**self._prepare_for_class(A__ ,A__ ) )
_A : Dict = outputs.attentions
self.assertEqual(len(A__ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
_A : Union[str, Any] = len(A__ )
# Check attention is always last and order is fine
_A : Any = True
_A : str = True
_A : int = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
_A : Dict = model(**self._prepare_for_class(A__ ,A__ ) )
self.assertEqual(out_len + 1 ,len(A__ ) )
_A : Optional[int] = outputs.attentions
self.assertEqual(len(A__ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
def A__ ( self ):
def check_hidden_states_output(A__ ,A__ ,A__ ):
_A : Optional[int] = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
_A : Union[str, Any] = model(**self._prepare_for_class(A__ ,A__ ) )
_A : Union[str, Any] = outputs.hidden_states
_A : str = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(A__ ) ,A__ )
_A : Tuple = self.model_tester.seq_length - self.model_tester.num_masks
_A : int = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Optional[Any] = True
check_hidden_states_output(A__ ,A__ ,A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Dict = True
check_hidden_states_output(A__ ,A__ ,A__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A__ ( self ):
pass
def a__ () -> Optional[int]:
_A : List[str] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
_A : Optional[Any] = np.load(__lowercase )
return list(__lowercase )
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def A__ ( self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def A__ ( self ):
_A : Tuple = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
A__ )
_A : List[Any] = self.default_image_processor
_A : str = prepare_video()
_A : Any = image_processor(A__ ,return_tensors='''pt''' ).to(A__ )
# forward pass
with torch.no_grad():
_A : Any = model(**A__ )
# verify the logits
_A : Optional[Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape ,A__ )
_A : List[str] = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A__ ,atol=1E-4 ) )
@slow
def A__ ( self ):
_A : List[str] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(A__ )
_A : List[str] = self.default_image_processor
_A : Optional[int] = prepare_video()
_A : List[str] = image_processor(A__ ,return_tensors='''pt''' ).to(A__ )
# add boolean mask, indicating which patches to mask
_A : Optional[int] = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' ,filename='''bool_masked_pos.pt''' )
_A : Optional[int] = torch.load(A__ )
# forward pass
with torch.no_grad():
_A : Any = model(**A__ )
# verify the logits
_A : int = torch.Size([1, 1408, 1536] )
_A : Tuple = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] ,device=A__ )
self.assertEqual(outputs.logits.shape ,A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,A__ ,atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_A : List[Any] = torch.tensor([0.51_42] ,device=A__ )
self.assertTrue(torch.allclose(outputs.loss ,A__ ,atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_A : List[Any] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ,norm_pix_loss=A__ ).to(
A__ )
with torch.no_grad():
_A : List[str] = model(**A__ )
_A : Dict = torch.tensor(torch.tensor([0.64_69] ) ,device=A__ )
self.assertTrue(torch.allclose(outputs.loss ,A__ ,atol=1E-4 ) )
| 332
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
_UpperCamelCase : Optional[Any] =logging.get_logger(__name__)
_UpperCamelCase : Tuple ='Hello, World!'
_UpperCamelCase : List[str] ='en_XX'
def a__ (__lowercase :str , __lowercase :str , __lowercase :bool ) -> Tuple:
_A : Optional[int] = Path('''data_bin''' )
_A : Dict = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__lowercase ).parent ) , checkpoint_file=Path(__lowercase ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(__lowercase ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(__lowercase ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(__lowercase )
_A : Union[str, Any] = xmod.model.encoder.sentence_encoder
_A : Tuple = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
_A : Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , __lowercase )
_A : List[str] = XmodForSequenceClassification(__lowercase ) if classification_head else XmodForMaskedLM(__lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
_A : Optional[Any] = xmod_sent_encoder.embed_tokens.weight
_A : Dict = xmod_sent_encoder.embed_positions.weight
_A : List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
_A : Tuple = xmod_sent_encoder.layernorm_embedding.weight
_A : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_A : Optional[Any] = model.roberta.encoder.layer[i]
_A : Dict = xmod_sent_encoder.layers[i]
# self attention
_A : int = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
_A : Any = xmod_layer.self_attn.q_proj.weight
_A : List[str] = xmod_layer.self_attn.q_proj.bias
_A : str = xmod_layer.self_attn.k_proj.weight
_A : Optional[int] = xmod_layer.self_attn.k_proj.bias
_A : Dict = xmod_layer.self_attn.v_proj.weight
_A : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
_A : Dict = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
_A : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
_A : List[str] = xmod_layer.self_attn.out_proj.bias
_A : Tuple = xmod_layer.self_attn_layer_norm.weight
_A : Optional[int] = xmod_layer.self_attn_layer_norm.bias
# intermediate
_A : List[Any] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
_A : Any = xmod_layer.fca.weight
_A : str = xmod_layer.fca.bias
# output
_A : Optional[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
_A : Any = xmod_layer.fca.weight
_A : Union[str, Any] = xmod_layer.fca.bias
_A : Tuple = xmod_layer.final_layer_norm.weight
_A : Tuple = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
_A : int = xmod_layer.adapter_layer_norm.weight
_A : Dict = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
_A : List[Any] = bert_output.adapter_modules[lang_code]
_A : Dict = xmod_layer.adapter_modules[lang_code]
_A : Tuple = from_adapter.fca.weight
_A : Optional[Any] = from_adapter.fca.bias
_A : str = from_adapter.fca.weight
_A : List[str] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
_A : str = xmod_sent_encoder.layer_norm.weight
_A : Any = xmod_sent_encoder.layer_norm.bias
if classification_head:
_A : int = xmod.model.classification_heads['''mnli'''].dense.weight
_A : List[str] = xmod.model.classification_heads['''mnli'''].dense.bias
_A : Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
_A : List[str] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
_A : Any = xmod.model.encoder.lm_head.dense.weight
_A : Optional[int] = xmod.model.encoder.lm_head.dense.bias
_A : str = xmod.model.encoder.lm_head.layer_norm.weight
_A : Optional[Any] = xmod.model.encoder.lm_head.layer_norm.bias
_A : Tuple = xmod.model.encoder.lm_head.weight
_A : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
_A : Dict = xmod.encode(__lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__lowercase )
_A : str = model(__lowercase )[0]
if classification_head:
_A : int = xmod.model.classification_heads['''mnli'''](xmod.extract_features(__lowercase ) )
else:
_A : int = xmod.model(__lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
_A : List[str] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
_A : List[Any] = torch.allclose(__lowercase , __lowercase , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(__lowercase ).mkdir(parents=__lowercase , exist_ok=__lowercase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCamelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
_UpperCamelCase : str =parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 332
| 1
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=0 ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = 1.0 if scale is None else scale
lowercase__ : Optional[Any] = 0.0 if loc is None else loc
super().__init__(lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase )] )
@property
def __a ( self ) -> List[Any]:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def __a ( self ) -> Optional[int]:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def __a ( self ) -> Tuple:
"""simple docstring"""
return self.variance.sqrt()
class UpperCAmelCase( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
lowercase__ : Dict = args_dim
lowercase__ : Tuple = nn.ModuleList([nn.Linear(lowerCamelCase , lowerCamelCase ) for dim in args_dim.values()] )
lowercase__ : Tuple = domain_map
def __a ( self , lowerCamelCase ) -> Tuple[torch.Tensor]:
"""simple docstring"""
lowercase__ : str = [proj(lowerCamelCase ) for proj in self.proj]
return self.domain_map(*lowerCamelCase )
class UpperCAmelCase( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Any:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[int] = function
def __a ( self , lowerCamelCase , *lowerCamelCase ) -> Tuple:
"""simple docstring"""
return self.function(lowerCamelCase , *lowerCamelCase )
class UpperCAmelCase:
"""simple docstring"""
a : type
a : int
a : Dict[str, int]
def __init__( self , lowerCamelCase = 1 ) -> None:
"""simple docstring"""
lowercase__ : Dict = dim
lowercase__ : Optional[Any] = {k: dim * self.args_dim[k] for k in self.args_dim}
def __a ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*lowerCamelCase )
else:
return Independent(self.distribution_class(*lowerCamelCase ) , 1 )
def __a ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ) -> Distribution:
"""simple docstring"""
lowercase__ : Optional[int] = self._base_distribution(lowerCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase , loc=lowerCamelCase , scale=lowerCamelCase , event_dim=self.event_dim )
@property
def __a ( self ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def __a ( self ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def __a ( self ) -> float:
"""simple docstring"""
return 0.0
def __a ( self , lowerCamelCase ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __a ( self , *lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def __a ( lowerCamelCase ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(lowerCamelCase ) + 4.0 )) / 2.0
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
a : type = StudentT
@classmethod
def __a ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
lowercase__ : Dict = 2.0 + cls.squareplus(lowerCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Dict[str, int] = {"loc": 1, "scale": 1}
a : type = Normal
@classmethod
def __a ( cls , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowercase__ : str = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Dict[str, int] = {"total_count": 1, "logits": 1}
a : type = NegativeBinomial
@classmethod
def __a ( cls , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
lowercase__ : int = cls.squareplus(lowerCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __a ( self , lowerCamelCase ) -> Distribution:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) , 1 )
def __a ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Distribution:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 397
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCAmelCase:
"""simple docstring"""
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
return None
class UpperCAmelCase:
"""simple docstring"""
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return None
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : str = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __a ( self ) -> Any:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase , "tf" , 12 , **lowerCamelCase )
@require_torch
@slow
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase , "pt" , 12 , **lowerCamelCase )
@require_torch
@slow
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
from transformers import BertModel
lowercase__ : Dict = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(lowerCamelCase ) )
vocab_file.flush()
lowercase__ : Any = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase__ : int = BertModel(BertConfig(vocab_size=len(lowerCamelCase ) ) )
model.save_pretrained(lowerCamelCase )
self._test_export(lowerCamelCase , "pt" , 12 , lowerCamelCase )
@require_tf
@slow
def __a ( self ) -> int:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase__ : str = self._test_export(lowerCamelCase , "tf" , 12 , **lowerCamelCase )
lowercase__ : Tuple = quantize(Path(lowerCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def __a ( self ) -> int:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase__ : str = self._test_export(lowerCamelCase , "pt" , 12 , **lowerCamelCase )
lowercase__ : Optional[int] = quantize(lowerCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ) -> Dict:
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase__ : Tuple = Path(lowerCamelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase )
return path
except Exception as e:
self.fail(lowerCamelCase )
@require_torch
@require_tokenizers
@slow
def __a ( self ) -> str:
"""simple docstring"""
from transformers import BertModel
lowercase__ : List[str] = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
lowercase__ : Dict = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(lowerCamelCase , lowerCamelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
from transformers import TFBertModel
lowercase__ : Tuple = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
lowercase__ : str = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(lowerCamelCase , lowerCamelCase , "tf" )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = FeatureExtractionPipeline(lowerCamelCase , lowerCamelCase )
lowercase__ : Optional[Any] = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = infer_shapes(lowerCamelCase , lowerCamelCase )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowerCamelCase )
self.assertSequenceEqual(variable_names[3:] , lowerCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = ["input_ids", "attention_mask", "token_type_ids"]
lowercase__ : Optional[int] = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
lowercase__ , lowercase__ : int = ensure_valid_input(FuncContiguousArgs() , lowerCamelCase , lowerCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase ) , set(lowerCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase__ , lowercase__ : int = ensure_valid_input(FuncNonContiguousArgs() , lowerCamelCase , lowerCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase ) , 1 )
self.assertEqual(len(lowerCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def __a ( self ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 397
| 1
|
def _snake_case ( __snake_case ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ : str = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _snake_case ( __snake_case ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ : Dict = [chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
UpperCAmelCase_ : Tuple = remove_duplicates(key.upper() )
UpperCAmelCase_ : Tuple = len(lowercase__ )
# First fill cipher with key characters
UpperCAmelCase_ : List[Any] = {alphabet[i]: char for i, char in enumerate(lowercase__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowercase__ ) , 2_6 ):
UpperCAmelCase_ : Tuple = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
UpperCAmelCase_ : List[str] = alphabet[i - offset]
UpperCAmelCase_ : Union[str, Any] = char
return cipher_alphabet
def _snake_case ( __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
return "".join(cipher_map.get(lowercase__ , lowercase__ ) for ch in message.upper() )
def _snake_case ( __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ : List[str] = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowercase__ , lowercase__ ) for ch in message.upper() )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ : List[Any] = input("Enter message to encode or decode: " ).strip()
UpperCAmelCase_ : Optional[Any] = input("Enter keyword: " ).strip()
UpperCAmelCase_ : Dict = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
UpperCAmelCase_ : List[str] = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
UpperCAmelCase_ : List[str] = create_cipher_map(lowercase__ )
print(func(lowercase__ , lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 719
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = AltDiffusionPipeline
_lowerCamelCase = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def A_ ( self):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,)
UpperCAmelCase_ : Any = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=lowercase ,set_alpha_to_one=lowercase ,)
torch.manual_seed(0)
UpperCAmelCase_ : Any = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0)
UpperCAmelCase_ : Tuple = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5002 ,)
UpperCAmelCase_ : Dict = CLIPTextModel(lowercase)
UpperCAmelCase_ : List[Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta")
UpperCAmelCase_ : List[str] = 77
UpperCAmelCase_ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A_ ( self ,lowercase ,lowercase=0):
"""simple docstring"""
if str(lowercase).startswith("mps"):
UpperCAmelCase_ : Any = torch.manual_seed(lowercase)
else:
UpperCAmelCase_ : Dict = torch.Generator(device=lowercase).manual_seed(lowercase)
UpperCAmelCase_ : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def A_ ( self):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3)
def A_ ( self):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Any = self.get_dummy_components()
torch.manual_seed(0)
UpperCAmelCase_ : List[Any] = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase_ : Any = RobertaSeriesModelWithTransformation(lowercase)
UpperCAmelCase_ : Optional[Any] = text_encoder
UpperCAmelCase_ : List[Any] = AltDiffusionPipeline(**lowercase)
UpperCAmelCase_ : Union[str, Any] = alt_pipe.to(lowercase)
alt_pipe.set_progress_bar_config(disable=lowercase)
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(lowercase)
UpperCAmelCase_ : Optional[Any] = "A photo of an astronaut"
UpperCAmelCase_ : str = alt_pipe(**lowercase)
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Optional[Any] = np.array(
[0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = PNDMScheduler(skip_prk_steps=lowercase)
torch.manual_seed(0)
UpperCAmelCase_ : Any = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase_ : Union[str, Any] = RobertaSeriesModelWithTransformation(lowercase)
UpperCAmelCase_ : Dict = text_encoder
UpperCAmelCase_ : Optional[int] = AltDiffusionPipeline(**lowercase)
UpperCAmelCase_ : List[Any] = alt_pipe.to(lowercase)
alt_pipe.set_progress_bar_config(disable=lowercase)
UpperCAmelCase_ : str = self.get_dummy_inputs(lowercase)
UpperCAmelCase_ : Union[str, Any] = alt_pipe(**lowercase)
UpperCAmelCase_ : int = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array(
[0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class snake_case_ (unittest.TestCase ):
"""simple docstring"""
def A_ ( self):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : str = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" ,safety_checker=lowercase)
UpperCAmelCase_ : Any = alt_pipe.to(lowercase)
alt_pipe.set_progress_bar_config(disable=lowercase)
UpperCAmelCase_ : Any = "A painting of a squirrel eating a burger"
UpperCAmelCase_ : Any = torch.manual_seed(0)
UpperCAmelCase_ : Optional[int] = alt_pipe([prompt] ,generator=lowercase ,guidance_scale=6.0 ,num_inference_steps=20 ,output_type="np")
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : Dict = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" ,subfolder="scheduler")
UpperCAmelCase_ : Optional[int] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" ,scheduler=lowercase ,safety_checker=lowercase)
UpperCAmelCase_ : List[str] = alt_pipe.to(lowercase)
alt_pipe.set_progress_bar_config(disable=lowercase)
UpperCAmelCase_ : str = "A painting of a squirrel eating a burger"
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0)
UpperCAmelCase_ : List[str] = alt_pipe([prompt] ,generator=lowercase ,num_inference_steps=2 ,output_type="numpy")
UpperCAmelCase_ : int = output.images
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : List[Any] = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 455
| 0
|
UpperCAmelCase_ = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
UpperCAmelCase_ = ["a", "b", "c", "d", "e"]
def A__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> Any:
"""simple docstring"""
_UpperCAmelCase = start
# add current to visited
visited.append(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
_UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# if all neighbors visited add current to sort
sort.append(SCREAMING_SNAKE_CASE_ )
# if all vertices haven't been visited select a new one to visit
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
for vertice in vertices:
if vertice not in visited:
_UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase_ = topological_sort("a", [], [])
print(sort)
| 32
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =(DEISMultistepScheduler,)
SCREAMING_SNAKE_CASE_ : List[str] =(("num_inference_steps", 25),)
def __lowerCAmelCase ( self : Tuple , **SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
UpperCamelCase = {
'num_train_timesteps': 10_00,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Dict=0 , **SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
UpperCamelCase = dict(self.forward_default_kwargs )
UpperCamelCase = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
UpperCamelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase , UpperCamelCase = sample, sample
for t in range(SCREAMING_SNAKE_CASE__ , time_step + scheduler.config.solver_order + 1 ):
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
UpperCamelCase = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , **SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
UpperCamelCase = dict(self.forward_default_kwargs )
UpperCamelCase = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
UpperCamelCase = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any]=None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
if scheduler is None:
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = 10
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def __lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = dict(self.forward_default_kwargs )
UpperCamelCase = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ )
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE__ , 'set_timesteps' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE__ , 'set_timesteps' ):
UpperCamelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
UpperCamelCase = scheduler.timesteps[5]
UpperCamelCase = scheduler.timesteps[6]
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
UpperCamelCase = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
UpperCamelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCamelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCamelCase = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCamelCase = DEISMultistepScheduler.from_config(scheduler.config )
UpperCamelCase = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def __lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , algorithm_type='deis' , solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , algorithm_type=SCREAMING_SNAKE_CASE__ , )
UpperCamelCase = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , algorithm_type=SCREAMING_SNAKE_CASE__ , )
assert not torch.isnan(SCREAMING_SNAKE_CASE__ ).any(), "Samples have nan numbers"
def __lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__ , time_step=0 )
def __lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.full_loop()
UpperCamelCase = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def __lowerCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.full_loop(prediction_type='v_prediction' )
UpperCamelCase = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE__ , dynamic_thresholding_ratio=0 )
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = 10
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
assert sample.dtype == torch.floataa
| 282
| 0
|
import math
import unittest
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> bool:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__magic_name__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Union[str, Any] ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _A ( self : Optional[Any] ):
with self.assertRaises(__lowerCamelCase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 590
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Any = """trocr"""
snake_case__ : str = ["""past_key_values"""]
snake_case__ : str = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : List[str] , __lowerCamelCase : int=50_265 , __lowerCamelCase : Tuple=1_024 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Tuple=16 , __lowerCamelCase : int=4_096 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : Any=2 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : str=0.0 , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=True , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : List[str]=0 , __lowerCamelCase : int=2 , **__lowerCamelCase : Dict , ):
UpperCamelCase :Optional[Any] = vocab_size
UpperCamelCase :str = d_model
UpperCamelCase :Dict = decoder_layers
UpperCamelCase :Tuple = decoder_attention_heads
UpperCamelCase :Tuple = decoder_ffn_dim
UpperCamelCase :List[Any] = activation_function
UpperCamelCase :Dict = max_position_embeddings
UpperCamelCase :Optional[Any] = dropout
UpperCamelCase :List[str] = attention_dropout
UpperCamelCase :int = activation_dropout
UpperCamelCase :List[str] = init_std
UpperCamelCase :int = decoder_layerdrop
UpperCamelCase :List[Any] = use_cache
UpperCamelCase :Optional[Any] = scale_embedding
UpperCamelCase :Any = use_learned_position_embeddings
UpperCamelCase :Tuple = layernorm_embedding
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
| 590
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class __lowerCamelCase (_a ):
_lowercase = """data2vec-text"""
def __init__( self: Any,A_: Union[str, Any]=3_0522,A_: Optional[Any]=768,A_: Optional[int]=12,A_: int=12,A_: int=3072,A_: Tuple="gelu",A_: List[str]=0.1,A_: str=0.1,A_: str=512,A_: Union[str, Any]=2,A_: Union[str, Any]=0.0_2,A_: Optional[int]=1E-12,A_: Dict=1,A_: str=0,A_: Any=2,A_: Optional[int]="absolute",A_: Tuple=True,A_: int=None,**A_: List[Any],):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 1
|
'''simple docstring'''
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , _a ):
"""simple docstring"""
# we need a list not a string, so do something to change the type
a__ = arr.split(',' )
def lowercase__ ( self ):
"""simple docstring"""
a__ = [int(self.array[0] )] * len(self.array )
a__ = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
a__ = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
a__ = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__A : str = input('please input some numbers:')
__A : int = SubArray(whole_array)
__A : str = array.solve_sub_array()
print(('the results is:', re))
| 394
| 0
|
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
A__ : Optional[int] = logging.get_logger(__name__)
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['pixel_values']
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = 8 , **__UpperCamelCase , )-> None:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : List[str] = do_rescale
UpperCAmelCase__ : List[Any] = rescale_factor
UpperCAmelCase__ : Union[str, Any] = do_pad
UpperCAmelCase__ : Tuple = pad_size
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase )-> np.ndarray:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None )-> Optional[int]:
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = get_image_size(__UpperCamelCase )
UpperCAmelCase__ : Dict = (old_height // size + 1) * size - old_height
UpperCAmelCase__ : Optional[int] = (old_width // size + 1) * size - old_width
return pad(__UpperCamelCase , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> Tuple:
UpperCAmelCase__ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Union[str, Any] = do_pad if do_pad is not None else self.do_pad
UpperCAmelCase__ : List[Any] = pad_size if pad_size is not None else self.pad_size
UpperCAmelCase__ : str = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Union[str, Any] = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_rescale:
UpperCAmelCase__ : List[Any] = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_pad:
UpperCAmelCase__ : Optional[int] = [self.pad(__UpperCamelCase , size=__UpperCamelCase ) for image in images]
UpperCAmelCase__ : Dict = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
UpperCAmelCase__ : List[str] = {"pixel_values": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 660
|
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
A__ : int = getLogger(__name__)
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 1024 , lowerCAmelCase : List[Any]="val" , lowerCAmelCase : str=None , lowerCAmelCase : int=False , lowerCAmelCase : Dict="summarization" , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Dict = None , lowerCAmelCase : List[str]="" , **lowerCAmelCase : int , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = str(lowerCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=lowerCAmelCase )
UpperCAmelCase__ : List[str] = Path(lowerCAmelCase )
UpperCAmelCase__ : str = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).cuda()
if fpaa:
UpperCAmelCase__ : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase , lowerCAmelCase ) # update config with task specific params
UpperCAmelCase__ : List[Any] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase__ : Any = num_return_sequences
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase__ : int = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase__ : Union[str, Any] = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase__ : str = SeqaSeqDataset(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , max_target_length=1024 , type_path=lowerCAmelCase , n_obs=lowerCAmelCase , prefix=lowerCAmelCase , **lowerCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase__ : Union[str, Any] = ds.make_sortish_sampler(lowerCAmelCase , distributed=lowerCAmelCase , add_extra_examples=lowerCAmelCase , shuffle=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = DataLoader(lowerCAmelCase , sampler=lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn )
UpperCAmelCase__ : str = []
for batch in tqdm(lowerCAmelCase ):
UpperCAmelCase__ : Dict = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=lowerCAmelCase , num_beams=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase__ : int = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
UpperCAmelCase__ : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase__ : str = chunks(lowerCAmelCase , lowerCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(lowerCAmelCase , lowerCAmelCase )
return results, sampler.num_replicas
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=lowerCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=lowerCAmelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=lowerCAmelCase , default=lowerCAmelCase )
parser.add_argument(
"--type_path" , type=lowerCAmelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=lowerCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowerCAmelCase , default=8 , required=lowerCAmelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=lowerCAmelCase , default=-1 , required=lowerCAmelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=lowerCAmelCase , default=1 , required=lowerCAmelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=lowerCAmelCase , default=600 , required=lowerCAmelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument(
"--prefix" , type=lowerCAmelCase , required=lowerCAmelCase , default=lowerCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase__ : Optional[int] = time.time()
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = parser.parse_known_args()
UpperCAmelCase__ : int = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase__ : Dict = Path(args.save_dir + "_tmp" )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) # this handles locking.
UpperCAmelCase__ : List[str] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase__ : List[str] = {}
if args.src_lang is not None:
UpperCAmelCase__ : str = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase__ : List[str] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = eval_data_dir(
args.data_dir , lowerCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase , **lowerCAmelCase , )
if args.local_rank <= 0:
UpperCAmelCase__ : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ : Tuple = gather_results_from_each_node(lowerCAmelCase , lowerCAmelCase , args.sync_timeout )
UpperCAmelCase__ : Union[str, Any] = combine_partial_results(lowerCAmelCase )
if args.num_return_sequences > 1:
UpperCAmelCase__ : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowerCAmelCase , lowerCAmelCase )
return
UpperCAmelCase__ : Optional[Any] = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(lowerCAmelCase ) as f:
UpperCAmelCase__ : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase__ : List[Any] = "translation" in args.task
UpperCAmelCase__ : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase__ : Optional[Any] = "bleu" if calc_bleu else "rouge"
UpperCAmelCase__ : Dict = score_fn(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = time.time() - start_time
UpperCAmelCase__ : Optional[int] = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase__ : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase__ : Any = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
print(lowerCAmelCase )
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(lowerCAmelCase )
def a__ ( lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = []
for partial_result in partial_results:
records.extend(lowerCAmelCase )
UpperCAmelCase__ : Dict = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x["id"] )
UpperCAmelCase__ : List[str] = [x["pred"] for x in records]
return preds
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
# WAIT FOR lots of .json files
UpperCAmelCase__ : int = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase__ : Dict = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase__ : str = list(save_dir.glob("rank_*.json" ) )
if len(lowerCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase__ : Union[str, Any] = lmap(lowerCAmelCase , lowerCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 660
| 1
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCamelCase__ : Tuple = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
UpperCamelCase__ : Union[str, Any] = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
UpperCamelCase__ : Any = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ,id='''token''' ) ,id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' ,id='''token''' ) ,id='''sequence''' ) ,id='''references''' ),
} ) ,)
def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : List[List[List[str]]] ,__lowerCamelCase : List[List[str]] ,__lowerCamelCase : int = 1 ,__lowerCamelCase : int = 4 ,):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__lowerCamelCase ,hypotheses=__lowerCamelCase ,min_len=__lowerCamelCase ,max_len=__lowerCamelCase )
}
| 387
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_=1_0_2_4, snake_case_=1_0_2_4, snake_case_=False, **snake_case_ ) -> Optional[Any]:
"""simple docstring"""
a = AutoTokenizer.from_pretrained(snake_case_ )
a = SeqaSeqDataset(snake_case_, snake_case_, snake_case_, snake_case_, type_path='''train''', **snake_case_ )
a = tok.pad_token_id
def get_lens(snake_case_ ):
a = tqdm(
DataLoader(snake_case_, batch_size=5_1_2, num_workers=8, shuffle=snake_case_, collate_fn=ds.collate_fn ), desc=str(ds.len_file ), )
a = []
for batch in dl:
a = batch['''input_ids'''].ne(snake_case_ ).sum(1 ).tolist()
a = batch['''labels'''].ne(snake_case_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(snake_case_, snake_case_ ):
max_lens.append(max(snake_case_, snake_case_ ) )
else:
max_lens.extend(snake_case_ )
return max_lens
a = get_lens(snake_case_ )
a = SeqaSeqDataset(snake_case_, snake_case_, snake_case_, snake_case_, type_path='''val''', **snake_case_ )
a = get_lens(snake_case_ )
pickle_save(snake_case_, train_ds.len_file )
pickle_save(snake_case_, val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 387
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
A_ : int = logging.getLogger(__name__)
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
_UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Whether tp freeze the encoder.'''} )
_UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
_UpperCAmelCase = field(
default='''summarization''', metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''}, )
_UpperCAmelCase = field(
default=1_024, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
_UpperCAmelCase = field(
default=128, metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
_UpperCAmelCase = field(
default=142, metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
}, )
_UpperCAmelCase = field(
default=142, metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
_UpperCAmelCase = field(default=-1, metadata={'''help''': '''# training examples. -1 means use all.'''} )
_UpperCAmelCase = field(default=-1, metadata={'''help''': '''# validation examples. -1 means use all.'''} )
_UpperCAmelCase = field(default=-1, metadata={'''help''': '''# test examples. -1 means use all.'''} )
_UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Source language id for translation.'''} )
_UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Target language id for translation.'''} )
_UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''# num_beams to use for evaluation.'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''}, )
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[Any] , UpperCAmelCase__ :Union[str, Any] ):
'''simple docstring'''
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , F"""{split}_results.json""" ) )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a = parser.parse_args_into_dataclasses()
check_output_dir(UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
assert hasattr(UpperCAmelCase__ , UpperCAmelCase__ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(UpperCAmelCase__ , UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(UpperCAmelCase__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
a = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(UpperCAmelCase__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
a = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
a = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(UpperCAmelCase__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
a = SeqaSeqDataset
# Get datasets
a = (
dataset_class(
UpperCAmelCase__ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
a = (
dataset_class(
UpperCAmelCase__ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
a = (
dataset_class(
UpperCAmelCase__ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
a = (
build_compute_metrics_fn(data_args.task , UpperCAmelCase__ ) if training_args.predict_with_generate else None
)
a = SeqaSeqTrainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , data_args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , data_collator=SeqaSeqDataCollator(
UpperCAmelCase__ , UpperCAmelCase__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , )
a = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
a = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
a = train_result.metrics
a = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , UpperCAmelCase__ , training_args.output_dir )
all_metrics.update(UpperCAmelCase__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a = trainer.evaluate(metric_key_prefix="val" )
a = data_args.n_val
a = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , UpperCAmelCase__ , training_args.output_dir )
all_metrics.update(UpperCAmelCase__ )
if training_args.do_predict:
logger.info("*** Predict ***" )
a = trainer.predict(test_dataset=UpperCAmelCase__ , metric_key_prefix="test" )
a = test_output.metrics
a = data_args.n_test
if trainer.is_world_process_zero():
a = round(metrics["test_loss"] , 4 )
handle_metrics("test" , UpperCAmelCase__ , training_args.output_dir )
all_metrics.update(UpperCAmelCase__ )
if training_args.predict_with_generate:
a = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ )
a = lmap(str.strip , UpperCAmelCase__ )
write_txt_file(UpperCAmelCase__ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(UpperCAmelCase__ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def UpperCAmelCase__ ( UpperCAmelCase__ :Union[str, Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 32
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Any ):
'''simple docstring'''
a = TaConfig.from_json_file(UpperCAmelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
a = TaForConditionalGeneration(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 32
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Tuple = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = "vit_mae"
def __init__( self : Optional[int] , A_ : Union[str, Any]=7_68 , A_ : List[Any]=12 , A_ : Union[str, Any]=12 , A_ : Optional[int]=30_72 , A_ : Any="gelu" , A_ : List[str]=0.0 , A_ : Optional[Any]=0.0 , A_ : Any=0.02 , A_ : Union[str, Any]=1e-12 , A_ : int=2_24 , A_ : List[str]=16 , A_ : List[str]=3 , A_ : Tuple=True , A_ : List[str]=16 , A_ : Tuple=5_12 , A_ : int=8 , A_ : List[Any]=20_48 , A_ : Optional[Any]=0.75 , A_ : Union[str, Any]=False , **A_ : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_: List[str] = hidden_size
lowerCamelCase_: Optional[Any] = num_hidden_layers
lowerCamelCase_: Dict = num_attention_heads
lowerCamelCase_: Tuple = intermediate_size
lowerCamelCase_: Optional[Any] = hidden_act
lowerCamelCase_: Union[str, Any] = hidden_dropout_prob
lowerCamelCase_: Dict = attention_probs_dropout_prob
lowerCamelCase_: Tuple = initializer_range
lowerCamelCase_: Union[str, Any] = layer_norm_eps
lowerCamelCase_: Optional[Any] = image_size
lowerCamelCase_: Dict = patch_size
lowerCamelCase_: Any = num_channels
lowerCamelCase_: Dict = qkv_bias
lowerCamelCase_: List[Any] = decoder_num_attention_heads
lowerCamelCase_: Any = decoder_hidden_size
lowerCamelCase_: List[str] = decoder_num_hidden_layers
lowerCamelCase_: int = decoder_intermediate_size
lowerCamelCase_: Tuple = mask_ratio
lowerCamelCase_: int = norm_pix_loss
| 423
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = "Wav2Vec2FeatureExtractor"
_A = "AutoTokenizer"
def __init__( self : Any , A_ : Union[str, Any] , A_ : List[str] ) -> Tuple:
"""simple docstring"""
super().__init__(A_ , A_ )
lowerCamelCase_: str = self.feature_extractor
lowerCamelCase_: Optional[int] = False
@classmethod
def lowerCAmelCase ( cls : int , A_ : Dict , **A_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
return super().from_pretrained(A_ , **A_ )
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , A_ , )
lowerCamelCase_: Tuple = WavaVecaFeatureExtractor.from_pretrained(A_ , **A_ )
lowerCamelCase_: List[str] = WavaVecaCTCTokenizer.from_pretrained(A_ , **A_ )
return cls(feature_extractor=A_ , tokenizer=A_ )
def __call__( self : List[str] , *A_ : Tuple , **A_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A_ , **A_ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
lowerCamelCase_: List[str] = kwargs.pop("""raw_speech""" )
else:
lowerCamelCase_: str = kwargs.pop("""audio""" , A_ )
lowerCamelCase_: str = kwargs.pop("""sampling_rate""" , A_ )
lowerCamelCase_: List[Any] = kwargs.pop("""text""" , A_ )
if len(A_ ) > 0:
lowerCamelCase_: List[str] = args[0]
lowerCamelCase_: Tuple = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
lowerCamelCase_: Dict = self.feature_extractor(A_ , *A_ , sampling_rate=A_ , **A_ )
if text is not None:
lowerCamelCase_: Tuple = self.tokenizer(A_ , **A_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase_: Any = encodings["""input_ids"""]
return inputs
def lowerCAmelCase ( self : Optional[Any] , *A_ : Optional[int] , **A_ : Tuple ) -> str:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*A_ , **A_ )
lowerCamelCase_: str = kwargs.pop("""input_features""" , A_ )
lowerCamelCase_: Optional[Any] = kwargs.pop("""labels""" , A_ )
if len(A_ ) > 0:
lowerCamelCase_: Tuple = args[0]
lowerCamelCase_: int = args[1:]
if input_features is not None:
lowerCamelCase_: Tuple = self.feature_extractor.pad(A_ , *A_ , **A_ )
if labels is not None:
lowerCamelCase_: Optional[int] = self.tokenizer.pad(A_ , **A_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCamelCase_: List[str] = labels["""input_ids"""]
return input_features
def lowerCAmelCase ( self : str , *A_ : int , **A_ : List[str] ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*A_ , **A_ )
def lowerCAmelCase ( self : Any , *A_ : Union[str, Any] , **A_ : str ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*A_ , **A_ )
@contextmanager
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
lowerCamelCase_: Union[str, Any] = True
lowerCamelCase_: int = self.tokenizer
yield
lowerCamelCase_: int = self.feature_extractor
lowerCamelCase_: Optional[Any] = False
| 423
| 1
|
'''simple docstring'''
class a_ :
def __init__( self : Optional[Any] , __lowerCAmelCase : Dict ):
__snake_case = set_counts
__snake_case = max(__A )
__snake_case = len(__A )
__snake_case = [1] * num_sets
__snake_case = list(range(__A ) )
def lowercase__ ( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict ):
__snake_case = self.get_parent(__A )
__snake_case = self.get_parent(__A )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__snake_case = 0
__snake_case = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__snake_case = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__snake_case = 0
__snake_case = src_parent
__snake_case = self.set_counts[src_parent]
__snake_case = max(self.max_set , __A )
return True
def lowercase__ ( self : Dict , __lowerCAmelCase : Dict ):
if self.parents[disj_set] == disj_set:
return disj_set
__snake_case = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 714
|
'''simple docstring'''
from __future__ import annotations
_lowercase = """Muhammad Umer Farooq"""
_lowercase = """MIT"""
_lowercase = """1.0.0"""
_lowercase = """Muhammad Umer Farooq"""
_lowercase = """contact@muhammadumerfarooq.me"""
_lowercase = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class a_ ( UpperCAmelCase__ ):
def __init__( self : int , __lowerCAmelCase : str ):
super().__init__()
__snake_case = []
__snake_case = domain
def lowercase__ ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : list[tuple[str, str | None]] ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case = parse.urljoin(self.domain , __lowerCAmelCase )
self.urls.append(__lowerCAmelCase )
def lowerCamelCase__ ( a ):
return ".".join(get_sub_domain_name(a ).split('.' )[-2:] )
def lowerCamelCase__ ( a ):
return parse.urlparse(a ).netloc
def lowerCamelCase__ ( a = "https://github.com" ):
__snake_case = get_domain_name(a )
# Initialize the parser
__snake_case = Parser(a )
try:
# Open URL
__snake_case = requests.get(a )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case = requests.get(a )
# Get the valid email.
__snake_case = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(a )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(a )
if __name__ == "__main__":
_lowercase = emails_from_url("""https://github.com""")
print(f'''{len(emails)} emails found:''')
print("""\n""".join(sorted(emails)))
| 427
| 0
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _A (__a , __a , __a=None ) -> Any:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f'{torch_layer} layer.weight does not match'
SCREAMING_SNAKE_CASE_ : Dict = nn.Parameter(__a )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'{torch_layer} layer.bias does not match'
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Parameter(__a )
def _A (__a , __a , __a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.asarray(weights[0] )
SCREAMING_SNAKE_CASE_ : str = np.asarray(weights[1] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.output.dense , torch.tensor(__a ).view(-1 , __a ).contiguous().transpose(0 , 1 ) , )
def _A (__a , __a , __a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = np.asarray(weights[0] )
SCREAMING_SNAKE_CASE_ : str = np.asarray(weights[1] )
SCREAMING_SNAKE_CASE_ : Dict = np.asarray(weights[2] )
SCREAMING_SNAKE_CASE_ : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.output.dense , torch.tensor(__a ).view(-1 , __a ).contiguous().transpose(0 , 1 ) , )
def _A (__a , __a , __a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = weights[0][0][0]
SCREAMING_SNAKE_CASE_ : Optional[int] = np.asarray(layer_norm_a[0] )
SCREAMING_SNAKE_CASE_ : int = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# lsh weights + output
SCREAMING_SNAKE_CASE_ : Union[str, Any] = weights[0][1]
if len(__a ) < 4:
set_layer_weights_in_torch_lsh(__a , torch_block.attention , __a )
else:
set_layer_weights_in_torch_local(__a , torch_block.attention , __a )
# intermediate weighs
SCREAMING_SNAKE_CASE_ : List[Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(__a ) == 4:
SCREAMING_SNAKE_CASE_ : Dict = intermediate_weights[2]
# layernorm 2
SCREAMING_SNAKE_CASE_ : int = np.asarray(intermediate_weights[0][0] )
SCREAMING_SNAKE_CASE_ : Any = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# intermediate dense
SCREAMING_SNAKE_CASE_ : str = np.asarray(intermediate_weights[1][0] )
SCREAMING_SNAKE_CASE_ : Dict = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
# intermediate out
SCREAMING_SNAKE_CASE_ : int = np.asarray(intermediate_weights[4][0] )
SCREAMING_SNAKE_CASE_ : str = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
def _A (__a , __a , __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = torch_model.reformer
# word embeds
SCREAMING_SNAKE_CASE_ : str = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__a ) , )
if isinstance(weights[3] , __a ):
SCREAMING_SNAKE_CASE_ : int = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
SCREAMING_SNAKE_CASE_ : Any = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'{position_embeddings[emb_idx]} emb does not match'
SCREAMING_SNAKE_CASE_ : List[Any] = nn.Parameter(torch.tensor(__a ) )
SCREAMING_SNAKE_CASE_ : Any = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__a ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__a , __a , __a )
# output layer norm
SCREAMING_SNAKE_CASE_ : Tuple = np.asarray(weights[7][0] )
SCREAMING_SNAKE_CASE_ : Dict = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# output embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.asarray(weights[9][0] )
SCREAMING_SNAKE_CASE_ : List[Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
def _A (__a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ReformerConfig.from_json_file(__a )
print(f'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE_ : str = ReformerModelWithLMHead(__a )
with open(__a , '''rb''' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = pickle.load(__a )['''weights''']
set_model_weights_in_torch(__a , __a , config.hidden_size )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ : int = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 512
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
UpperCAmelCase_ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _A (__a ) -> str:
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
SCREAMING_SNAKE_CASE_ : List[str] = model_type_to_module_name(__a )
SCREAMING_SNAKE_CASE_ : Dict = importlib.import_module(f'.{module_name}' , '''transformers.models''' )
try:
return getattr(__a , __a )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__a , '''__name__''' , __a ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module('''transformers''' )
if hasattr(__a , __a ):
return getattr(__a , __a )
return None
def _A (__a , __a = None , __a = False , __a = False , __a = None , __a = None , __a = None , __a = False , **__a , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_file_from_repo(
__a , __a , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(__a , encoding='''utf-8''' ) as reader:
return json.load(__a )
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : int):
'''simple docstring'''
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''')
@classmethod
@replace_list_option_in_docstrings(lowercase_)
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , lowercase_ : List[Any] , **lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop('''config''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''trust_remote_code''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = FeatureExtractionMixin.get_feature_extractor_dict(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = config_dict.get('''feature_extractor_type''' , lowercase_)
SCREAMING_SNAKE_CASE_ : str = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {}):
SCREAMING_SNAKE_CASE_ : Optional[int] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowercase_ , lowercase_):
SCREAMING_SNAKE_CASE_ : Any = AutoConfig.from_pretrained(lowercase_ , **lowercase_)
# It could be in `config.feature_extractor_type``
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(lowercase_ , '''feature_extractor_type''' , lowercase_)
if hasattr(lowercase_ , '''auto_map''') and "AutoFeatureExtractor" in config.auto_map:
SCREAMING_SNAKE_CASE_ : Dict = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
SCREAMING_SNAKE_CASE_ : Dict = feature_extractor_class_from_name(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = feature_extractor_auto_map is not None
SCREAMING_SNAKE_CASE_ : Dict = feature_extractor_class is not None or type(lowercase_) in FEATURE_EXTRACTOR_MAPPING
SCREAMING_SNAKE_CASE_ : Optional[int] = resolve_trust_remote_code(
lowercase_ , lowercase_ , lowercase_ , lowercase_)
if has_remote_code and trust_remote_code:
SCREAMING_SNAKE_CASE_ : Dict = get_class_from_dynamic_module(
lowercase_ , lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('''code_revision''' , lowercase_)
if os.path.isdir(lowercase_):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowercase_ , **lowercase_)
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowercase_ , **lowercase_)
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowercase_) in FEATURE_EXTRACTOR_MAPPING:
SCREAMING_SNAKE_CASE_ : Any = FEATURE_EXTRACTOR_MAPPING[type(lowercase_)]
return feature_extractor_class.from_dict(lowercase_ , **lowercase_)
raise ValueError(
F'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '
F'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}')
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : List[Any]):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(lowercase_ , lowercase_)
| 512
| 1
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def _A ( snake_case__ : Union[str, Any] ):
snake_case__ : Union[str, Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _A ( snake_case__ : Tuple ):
snake_case__ ,snake_case__ : Optional[Any] = emb.weight.shape
snake_case__ : str = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
snake_case__ : Tuple = emb.weight.data
return lin_layer
def _A ( snake_case__ : Dict ):
snake_case__ : Tuple = torch.load(__lowerCAmelCase , map_location='''cpu''' )
snake_case__ : List[str] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
snake_case__ : Optional[int] = mam_aaa['''model''']
remove_ignore_keys_(__lowerCAmelCase )
snake_case__ : List[str] = state_dict['''encoder.embed_tokens.weight'''].shape[0]
snake_case__ : Any = MaMaaaConfig(
vocab_size=__lowerCAmelCase , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
snake_case__ : str = state_dict['''decoder.embed_tokens.weight''']
snake_case__ : int = MaMaaaForConditionalGeneration(__lowerCAmelCase )
model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
snake_case__ : str = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 709
|
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCAmelCase : Union[str, Any] = "\nimport os\n"
_lowerCAmelCase : Optional[int] = "\ndef foo():\n import os\n return False\n"
_lowerCAmelCase : Union[str, Any] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
_lowerCAmelCase : str = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
_lowerCAmelCase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
_lowerCAmelCase : List[str] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
_lowerCAmelCase : Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case__ )
def _A ( snake_case__ : List[str] , snake_case__ : Dict ):
snake_case__ : str = os.path.join(snake_case__ , '''test_file.py''' )
with open(snake_case__ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case__ )
snake_case__ : int = get_imports(snake_case__ )
assert parsed_imports == ["os"]
| 694
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : Dict = 'distilbert'
_A : Optional[int] = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self , lowerCamelCase=3_05_22 , lowerCamelCase=5_12 , lowerCamelCase=False , lowerCamelCase=6 , lowerCamelCase=12 , lowerCamelCase=7_68 , lowerCamelCase=4 * 7_68 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase="gelu" , lowerCamelCase=0.0_2 , lowerCamelCase=0.1 , lowerCamelCase=0.2 , lowerCamelCase=0 , **lowerCamelCase , ):
snake_case__ = vocab_size
snake_case__ = max_position_embeddings
snake_case__ = sinusoidal_pos_embds
snake_case__ = n_layers
snake_case__ = n_heads
snake_case__ = dim
snake_case__ = hidden_dim
snake_case__ = dropout
snake_case__ = attention_dropout
snake_case__ = activation
snake_case__ = initializer_range
snake_case__ = qa_dropout
snake_case__ = seq_classif_dropout
super().__init__(**lowerCamelCase , pad_token_id=lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
@property
def A_ ( self ):
if self.task == "multiple-choice":
snake_case__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 276
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : Dict = 'biogpt'
def __init__( self , lowerCamelCase=4_23_84 , lowerCamelCase=10_24 , lowerCamelCase=24 , lowerCamelCase=16 , lowerCamelCase=40_96 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=10_24 , lowerCamelCase=0.0_2 , lowerCamelCase=1e-12 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , **lowerCamelCase , ):
snake_case__ = vocab_size
snake_case__ = max_position_embeddings
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = scale_embedding
snake_case__ = use_cache
snake_case__ = layerdrop
snake_case__ = activation_dropout
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
| 276
| 1
|
import math
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> bool:
"""simple docstring"""
assert isinstance(__UpperCamelCase , __UpperCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
A__ : Optional[Any] = range(3 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : List[Any]=1 , **__UpperCamelCase : str ) -> str:
"""simple docstring"""
A__ : Union[str, Any] = factor * value
A__ : str = value
while not is_prime(__UpperCamelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__UpperCamelCase )
return value
| 707
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __snake_case ( UpperCamelCase__ ):
raise NotImplementedError()
@abstractmethod
def __snake_case ( self ):
raise NotImplementedError()
| 55
| 0
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __a ( unittest.TestCase ):
"""simple docstring"""
_A : Tuple = inspect.getfile(accelerate.test_utils )
_A : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] )
_A : int = ["accelerate", "launch"]
_A : Dict = Path.home() / ".cache/huggingface/accelerate"
_A : Any = "default_config.yaml"
_A : str = config_folder / config_file
_A : Tuple = config_folder / "_default_config.yaml"
_A : List[Any] = Path("tests/test_configs" )
@classmethod
def __A ( cls : str ) -> Tuple:
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def __A ( cls : Optional[int] ) -> List[str]:
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def __A ( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] ,env=os.environ.copy() )
def __A ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
for config in sorted(self.test_config_path.glob("""**/*.yaml""" ) ):
with self.subTest(config_file=_UpperCamelCase ):
execute_subprocess_async(
self.base_cmd + ["""--config_file""", str(_UpperCamelCase ), self.test_file_path] ,env=os.environ.copy() )
def __A ( self : Dict ) -> Tuple:
'''simple docstring'''
execute_subprocess_async(["""accelerate""", """test"""] ,env=os.environ.copy() )
class __a ( unittest.TestCase ):
"""simple docstring"""
_A : Tuple = "test-tpu"
_A : int = "us-central1-a"
_A : Dict = "ls"
_A : Any = ["accelerate", "tpu-config"]
_A : List[str] = "cd /usr/share"
_A : List[str] = "tests/test_samples/test_command_file.sh"
_A : int = "Running gcloud compute tpus tpu-vm ssh"
def __A ( self : Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =run_command(
self.cmd
+ ["""--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug"""] ,return_stdout=_UpperCamelCase ,)
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" ,_UpperCamelCase ,)
def __A ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command""",
self.command,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] ,return_stdout=_UpperCamelCase ,)
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" ,_UpperCamelCase ,)
def __A ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--debug"""] ,return_stdout=_UpperCamelCase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_UpperCamelCase ,)
def __A ( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--debug"""] ,return_stdout=_UpperCamelCase ,)
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" ,_UpperCamelCase ,)
def __A ( self : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--command""",
self.command,
"""--command""",
"""echo \"Hello World\"""",
"""--debug""",
] ,return_stdout=_UpperCamelCase ,)
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" ,_UpperCamelCase ,)
def __A ( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =run_command(
self.cmd
+ ["""--config_file""", """tests/test_configs/latest.yaml""", """--command_file""", self.command_file, """--debug"""] ,return_stdout=_UpperCamelCase ,)
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_UpperCamelCase ,)
def __A ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command_file""",
self.command_file,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] ,return_stdout=_UpperCamelCase ,)
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_UpperCamelCase ,)
def __A ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--debug"""] ,return_stdout=_UpperCamelCase ,)
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_UpperCamelCase ,)
def __A ( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--install_accelerate""",
"""--accelerate_version""",
"""12.0.0""",
"""--debug""",
] ,return_stdout=_UpperCamelCase ,)
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_UpperCamelCase ,)
| 151
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __a ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_A : List[str] = ReformerTokenizer
_A : List[str] = ReformerTokenizerFast
_A : int = True
_A : List[Any] = False
_A : Optional[Any] = True
def __A ( self : Any ) -> Tuple:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE__ =ReformerTokenizer(_UpperCamelCase ,keep_accents=_UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ="""<s>"""
SCREAMING_SNAKE_CASE__ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) ,_UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) ,_UpperCamelCase )
def __A ( self : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""j""" )
self.assertEqual(len(_UpperCamelCase ) ,1_0_0_0 )
def __A ( self : Dict ) -> str:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,1_0_0_0 )
def __A ( self : Tuple ) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ ="""I was born in 92000, and this is falsé."""
SCREAMING_SNAKE_CASE__ =tokenizer.tokenize(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =rust_tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =rust_tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ =tokenizer.encode(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =rust_tokenizer.encode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
def __A ( self : List[str] ,_UpperCamelCase : Tuple=1_5 ) -> Dict:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE__ =self.rust_tokenizer_class.from_pretrained(_UpperCamelCase ,**_UpperCamelCase )
# Simple input
SCREAMING_SNAKE_CASE__ ="""This is a simple input"""
SCREAMING_SNAKE_CASE__ =["""This is a simple input 1""", """This is a simple input 2"""]
SCREAMING_SNAKE_CASE__ =("""This is a simple input""", """This is a pair""")
SCREAMING_SNAKE_CASE__ =[
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_UpperCamelCase ,tokenizer_r.encode ,_UpperCamelCase ,max_length=_UpperCamelCase ,padding="""max_length""" )
# Simple input
self.assertRaises(_UpperCamelCase ,tokenizer_r.encode_plus ,_UpperCamelCase ,max_length=_UpperCamelCase ,padding="""max_length""" )
# Simple input
self.assertRaises(
_UpperCamelCase ,tokenizer_r.batch_encode_plus ,_UpperCamelCase ,max_length=_UpperCamelCase ,padding="""max_length""" ,)
# Pair input
self.assertRaises(_UpperCamelCase ,tokenizer_r.encode ,_UpperCamelCase ,max_length=_UpperCamelCase ,padding="""max_length""" )
# Pair input
self.assertRaises(_UpperCamelCase ,tokenizer_r.encode_plus ,_UpperCamelCase ,max_length=_UpperCamelCase ,padding="""max_length""" )
# Pair input
self.assertRaises(
_UpperCamelCase ,tokenizer_r.batch_encode_plus ,_UpperCamelCase ,max_length=_UpperCamelCase ,padding="""max_length""" ,)
def __A ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
pass
def __A ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =ReformerTokenizer(_UpperCamelCase ,keep_accents=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_UpperCamelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) ,[2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] ,)
SCREAMING_SNAKE_CASE__ =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_UpperCamelCase ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
SCREAMING_SNAKE_CASE__ =tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase ,[8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] ,)
SCREAMING_SNAKE_CASE__ =tokenizer.convert_ids_to_tokens(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
@cached_property
def __A ( self : Dict ) -> List[str]:
'''simple docstring'''
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def __A ( self : Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ="""Hello World!"""
SCREAMING_SNAKE_CASE__ =[1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7]
self.assertListEqual(_UpperCamelCase ,self.big_tokenizer.encode(_UpperCamelCase ) )
@slow
def __A ( self : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =(
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
SCREAMING_SNAKE_CASE__ =[
1_0_8,
2_6_5,
2_4,
1_1_1,
4,
2_5_8,
1_5_6,
3_5,
2_8,
2_7_5,
3,
2_5_9,
2_9_7,
2_6_0,
8_4,
4,
3_5,
1_1_0,
4_4,
8,
2_5_9,
9_1,
2_6_8,
2_1,
1_1,
2_0_9,
2_7_4,
1_0_9,
2_6_6,
2_7_7,
1_1_7,
8_6,
9_3,
3_1_5,
2_5_8,
2_7_8,
2_5_8,
2_7_7,
2_5_8,
0,
2_5_8,
2_8_8,
2_5_8,
3_1_9,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
2_8_7,
2_5_8,
3_1_5,
2_5_8,
2_8_9,
2_5_8,
2_7_8,
9_9,
2_6_9,
2_6_6,
2_6_2,
8,
2_5_9,
2_4_1,
4,
2_1_7,
2_3_0,
2_6_8,
2_6_6,
5_5,
1_6_8,
1_0_6,
7_5,
1_9_3,
2_6_6,
2_2_3,
2_7,
4_9,
2_6,
2_8_2,
2_5,
2_6_4,
2_9_9,
1_9,
2_6,
0,
2_5_8,
2_7_7,
1_1_7,
8_6,
9_3,
1_7_6,
1_8_3,
2_7_0,
1_1,
2_6_2,
4_2,
6_1,
2_6_5,
]
self.assertListEqual(_UpperCamelCase ,self.big_tokenizer.encode(_UpperCamelCase ) )
@require_torch
@slow
def __A ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
SCREAMING_SNAKE_CASE__ =list(self.big_tokenizer.get_vocab().keys() )[:1_0]
SCREAMING_SNAKE_CASE__ =""" """.join(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =self.big_tokenizer.encode_plus(_UpperCamelCase ,return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ =self.big_tokenizer.batch_encode_plus([sequence, sequence] ,return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ =ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
SCREAMING_SNAKE_CASE__ =encoded_sequence["""input_ids"""].shape
SCREAMING_SNAKE_CASE__ =ReformerModel(_UpperCamelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCamelCase )
model(**_UpperCamelCase )
@slow
def __A ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ={"""input_ids""": [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
SCREAMING_SNAKE_CASE__ =[
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase ,model_name="""google/reformer-crime-and-punishment""" ,revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" ,padding=_UpperCamelCase ,sequences=_UpperCamelCase ,)
| 151
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
_a : Tuple =[1]
_a , _a , _a : List[Any] =0, 0, 0
_a : List[str] =ugly_nums[ia] * 2
_a : Optional[Any] =ugly_nums[ia] * 3
_a : Dict =ugly_nums[ia] * 5
for _ in range(1 ,_UpperCAmelCase ):
_a : Any =min(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
ugly_nums.append(_UpperCAmelCase )
if next_num == next_a:
ia += 1
_a : List[str] =ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
_a : Optional[Any] =ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
_a : Optional[Any] =ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(200) = }")
| 506
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Any=False ) -> str:
_a : Union[str, Any] =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_a : Optional[int] =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Tuple=False ) -> Any:
for i in range(config.num_hidden_layers ):
if base_model:
_a : str =""""""
else:
_a : Tuple ="""vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_a : Optional[int] =state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
_a : Any =state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_a : Any =in_proj_weight[
: config.hidden_size, :
]
_a : Dict =in_proj_bias[: config.hidden_size]
_a : Dict =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a : Optional[Any] =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_a : Union[str, Any] =in_proj_weight[
-config.hidden_size :, :
]
_a : List[str] =in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ) -> Optional[int]:
_a : Any =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ) -> Tuple:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
_a : str =[
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Dict ) -> int:
_a : str =dct.pop(_UpperCAmelCase )
_a : Any =val
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Any ) -> str:
_a : List[Any] =ViTMSNConfig()
_a : Optional[int] =1000
_a : Union[str, Any] ="""datasets/huggingface/label-files"""
_a : Any ="""imagenet-1k-id2label.json"""
_a : Optional[int] =json.load(open(hf_hub_download(_UpperCAmelCase ,_UpperCAmelCase ) ,"""r""" ) )
_a : int ={int(_UpperCAmelCase ): v for k, v in idalabel.items()}
_a : Optional[Any] =idalabel
_a : Dict ={v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_a : Tuple =384
_a : Optional[int] =1536
_a : Optional[int] =6
elif "l16" in checkpoint_url:
_a : int =1024
_a : int =4096
_a : List[str] =24
_a : Union[str, Any] =16
_a : Any =0.1
elif "b4" in checkpoint_url:
_a : Optional[int] =4
elif "l7" in checkpoint_url:
_a : Optional[int] =7
_a : Union[str, Any] =1024
_a : Dict =4096
_a : List[str] =24
_a : Any =16
_a : Dict =0.1
_a : Any =ViTMSNModel(_UpperCAmelCase )
_a : Union[str, Any] =torch.hub.load_state_dict_from_url(_UpperCAmelCase ,map_location="""cpu""" )["""target_encoder"""]
_a : Union[str, Any] =ViTImageProcessor(size=config.image_size )
remove_projection_head(_UpperCAmelCase )
_a : List[str] =create_rename_keys(_UpperCAmelCase ,base_model=_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase ,_UpperCAmelCase ,base_model=_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
_a : Union[str, Any] ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
_a : str =Image.open(requests.get(_UpperCAmelCase ,stream=_UpperCAmelCase ).raw )
_a : Union[str, Any] =ViTImageProcessor(
size=config.image_size ,image_mean=_UpperCAmelCase ,image_std=_UpperCAmelCase )
_a : Tuple =image_processor(images=_UpperCAmelCase ,return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
_a : str =model(**_UpperCAmelCase )
_a : Union[str, Any] =outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_a : Tuple =torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
_a : Optional[int] =torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
_a : str =torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
_a : List[str] =torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
_a : Optional[int] =torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] ,_UpperCAmelCase ,atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
A__: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__: Union[str, Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 506
| 1
|
"""simple docstring"""
from math import isqrt, loga
def _snake_case ( __snake_case : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
_lowerCamelCase : Optional[int] = False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def _snake_case ( __snake_case : int = 800800 , __snake_case : int = 800800 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = degree * loga(__snake_case )
_lowerCamelCase : Union[str, Any] = int(__snake_case )
_lowerCamelCase : Dict = calculate_prime_numbers(__snake_case )
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : Any = 0
_lowerCamelCase : Any = len(__snake_case ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88
|
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 88
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] ) -> int:
# Construct model
if gpta_config_file == "":
lowerCamelCase_ = GPTaConfig()
else:
lowerCamelCase_ = GPTaConfig.from_json_file(_lowerCamelCase )
lowerCamelCase_ = GPTaModel(_lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
lowerCamelCase_ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCamelCase_ = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _lowerCamelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 720
|
"""simple docstring"""
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_SCREAMING_SNAKE_CASE : List[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
_SCREAMING_SNAKE_CASE : List[str] = '''sshleifer/student_marian_en_ro_6_1'''
_SCREAMING_SNAKE_CASE : List[Any] = '''sshleifer/tiny-mbart'''
@require_torch
class a ( __snake_case ):
def UpperCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : str=True , ) -> int:
lowerCamelCase_ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=__SCREAMING_SNAKE_CASE , extra_args_str=__SCREAMING_SNAKE_CASE , predict_with_generate=__SCREAMING_SNAKE_CASE , do_train=__SCREAMING_SNAKE_CASE , do_eval=__SCREAMING_SNAKE_CASE , do_predict=__SCREAMING_SNAKE_CASE , )
lowerCamelCase_ = TrainerState.load_from_json(os.path.join(__SCREAMING_SNAKE_CASE , 'trainer_state.json' ) ).log_history
if not do_eval:
return
lowerCamelCase_ = [log for log in logs if 'eval_loss' in log.keys()]
lowerCamelCase_ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
lowerCamelCase_ = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] , __SCREAMING_SNAKE_CASE )
assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def UpperCamelCase ( self : Dict ) -> Any:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def UpperCamelCase ( self : str ) -> List[Any]:
self.run_seqaseq_quick(distributed=__SCREAMING_SNAKE_CASE )
@require_torch_multi_gpu
def UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
self.run_seqaseq_quick(distributed=__SCREAMING_SNAKE_CASE )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=__SCREAMING_SNAKE_CASE , extra_args_str='--sharded_ddp simple' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self : str ) -> List[Any]:
self.run_seqaseq_quick(distributed=__SCREAMING_SNAKE_CASE , extra_args_str='--sharded_ddp simple --fp16' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self : Any ) -> int:
self.run_seqaseq_quick(distributed=__SCREAMING_SNAKE_CASE , extra_args_str='--sharded_ddp zero_dp_2' , predict_with_generate=__SCREAMING_SNAKE_CASE )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
self.run_seqaseq_quick(
distributed=__SCREAMING_SNAKE_CASE , extra_args_str='--sharded_ddp zero_dp_2 --fp16' , predict_with_generate=__SCREAMING_SNAKE_CASE )
@require_apex
@require_torch_gpu
def UpperCamelCase ( self : List[Any] ) -> List[str]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__SCREAMING_SNAKE_CASE , extra_args_str='--fp16 --fp16_backend=apex' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__SCREAMING_SNAKE_CASE , extra_args_str='--fp16 --fp16_backend=apex' )
@parameterized.expand(['base', 'low', 'high', 'mixed'] )
@require_torch_multi_gpu
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
lowerCamelCase_ = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
lowerCamelCase_ = experiments[experiment_id]
lowerCamelCase_ = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
lowerCamelCase_ = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__SCREAMING_SNAKE_CASE , extra_args_str=data['extra_args_str'] )
lowerCamelCase_ = len(re.findall(__SCREAMING_SNAKE_CASE , cl.err ) )
self.assertEqual(__SCREAMING_SNAKE_CASE , data['n_matches'] )
@slow
def UpperCamelCase ( self : int ) -> str:
lowerCamelCase_ = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=__SCREAMING_SNAKE_CASE , learning_rate=3e-4 , num_train_epochs=10 , distributed=__SCREAMING_SNAKE_CASE , )
# Check metrics
lowerCamelCase_ = TrainerState.load_from_json(os.path.join(__SCREAMING_SNAKE_CASE , 'trainer_state.json' ) ).log_history
lowerCamelCase_ = [log for log in logs if 'eval_loss' in log.keys()]
lowerCamelCase_ = eval_metrics[0]
lowerCamelCase_ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] , __SCREAMING_SNAKE_CASE )
# test if do_predict saves generations and metrics
lowerCamelCase_ = os.listdir(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = {os.path.basename(__SCREAMING_SNAKE_CASE ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__SCREAMING_SNAKE_CASE : str ) -> Tuple[int, float]:
lowerCamelCase_ = '--skip_memory_metrics 0'
lowerCamelCase_ = self.run_trainer(
max_len=128 , model_name=__SCREAMING_SNAKE_CASE , learning_rate=3e-4 , num_train_epochs=1 , optim=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , extra_args_str=__SCREAMING_SNAKE_CASE , do_eval=__SCREAMING_SNAKE_CASE , do_predict=__SCREAMING_SNAKE_CASE , n_gpus_to_use=1 , )
# Check metrics
lowerCamelCase_ = TrainerState.load_from_json(Path(__SCREAMING_SNAKE_CASE , 'trainer_state.json' ) ).log_history
lowerCamelCase_ = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 )
lowerCamelCase_ = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 )
lowerCamelCase_ = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
lowerCamelCase_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
lowerCamelCase_ = gpu_peak_mem_orig + gpu_alloc_mem_orig
lowerCamelCase_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
lowerCamelCase_ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
lowerCamelCase_ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float = 3e-3 , __SCREAMING_SNAKE_CASE : str = "adafactor" , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : int = None , ) -> Optional[int]:
lowerCamelCase_ = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__SCREAMING_SNAKE_CASE )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__SCREAMING_SNAKE_CASE )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
lowerCamelCase_ = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__SCREAMING_SNAKE_CASE )}
'''.split()
lowerCamelCase_ = '\n --do_predict\n '.split()
lowerCamelCase_ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
lowerCamelCase_ = get_gpu_count()
lowerCamelCase_ = get_torch_dist_unique_port()
lowerCamelCase_ = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
lowerCamelCase_ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env() )
else:
lowerCamelCase_ = ['run_translation.py'] + args
with patch.object(__SCREAMING_SNAKE_CASE , 'argv' , __SCREAMING_SNAKE_CASE ):
main()
return output_dir
| 137
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __a ( __magic_name__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = 'Salesforce/blip-image-captioning-base'
__UpperCamelCase : str = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
__UpperCamelCase : Optional[Any] = 'image_captioner'
__UpperCamelCase : Dict = AutoModelForVisionaSeq
__UpperCamelCase : List[str] = ['image']
__UpperCamelCase : Optional[int] = ['text']
def __init__( self , *snake_case , **snake_case ):
"""simple docstring"""
requires_backends(self , ["vision"] )
super().__init__(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
return self.pre_processor(images=snake_case , return_tensors="pt" )
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
return self.model.generate(**snake_case )
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
return self.pre_processor.batch_decode(snake_case , skip_special_tokens=snake_case )[0].strip()
| 453
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_UpperCamelCase = get_tests_dir("""fixtures""")
_UpperCamelCase = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_UpperCamelCase = get_tests_dir("""fixtures/dummy-config.json""")
class __a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = 0
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : int = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowerCAmelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(snake_case ).to_dict()
config_dict.pop("feature_extractor_type" )
lowerCAmelCase__ : Any = WavaVecaFeatureExtractor(**snake_case )
# save in new folder
model_config.save_pretrained(snake_case )
config.save_pretrained(snake_case )
lowerCAmelCase__ : List[str] = AutoFeatureExtractor.from_pretrained(snake_case )
# make sure private variable is not incorrectly saved
lowerCAmelCase__ : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = AutoFeatureExtractor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case , "bert-base is not a local folder and is not a valid model identifier" ):
lowerCAmelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained("bert-base" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowerCAmelCase__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(snake_case , revision="aaaaaa" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
lowerCAmelCase__ : Dict = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
with self.assertRaises(snake_case ):
lowerCAmelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case ):
lowerCAmelCase__ : str = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case )
lowerCAmelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(snake_case )
lowerCAmelCase__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(snake_case , trust_remote_code=snake_case )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
try:
AutoConfig.register("custom" , snake_case )
AutoFeatureExtractor.register(snake_case , snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case ):
AutoFeatureExtractor.register(snake_case , snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase__ : List[Any] = CustomFeatureExtractor.from_pretrained(snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(snake_case )
lowerCAmelCase__ : Any = AutoFeatureExtractor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
class __a ( __magic_name__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = True
try:
AutoConfig.register("custom" , snake_case )
AutoFeatureExtractor.register(snake_case , snake_case )
# If remote code is not set, the default is to use local
lowerCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowerCAmelCase__ : Dict = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowerCAmelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(snake_case , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 453
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_: str = logging.get_logger(__name__)
lowerCAmelCase_: Optional[int] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class a__ ( lowercase__ ):
snake_case_ = "ibert"
def __init__( self, _UpperCAmelCase=3_0522, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=1, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase="absolute", _UpperCAmelCase=False, _UpperCAmelCase="none", **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = quant_mode
lowercase__ = force_dequant
class a__ ( lowercase__ ):
@property
def snake_case__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 704
|
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( _a ):
snake_case_ = (IPNDMScheduler,)
snake_case_ = (("num_inference_steps", 50),)
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = {"num_train_timesteps": 1000}
config.update(**_UpperCAmelCase )
return config
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
return sample
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase, "set_timesteps" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase, "set_timesteps" ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.timesteps[5]
lowercase__ = scheduler.timesteps[6]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def snake_case__ ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.full_loop()
lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 668
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : str = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
lowercase__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 8
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
lowercase__ : Any = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''tapas'''
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase=10.0 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="ratio" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__A : Dict = vocab_size
__A : Tuple = hidden_size
__A : Any = num_hidden_layers
__A : int = num_attention_heads
__A : Tuple = hidden_act
__A : Tuple = intermediate_size
__A : List[Any] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : List[str] = max_position_embeddings
__A : Optional[int] = type_vocab_sizes
__A : str = initializer_range
__A : List[str] = layer_norm_eps
# Fine-tuning task hyperparameters
__A : List[str] = positive_label_weight
__A : List[Any] = num_aggregation_labels
__A : Optional[Any] = aggregation_loss_weight
__A : Tuple = use_answer_as_supervision
__A : List[str] = answer_loss_importance
__A : Any = use_normalized_answer_loss
__A : Any = huber_loss_delta
__A : Union[str, Any] = temperature
__A : Tuple = aggregation_temperature
__A : Optional[Any] = use_gumbel_for_cells
__A : List[str] = use_gumbel_for_aggregation
__A : Tuple = average_approximation_function
__A : List[str] = cell_selection_preference
__A : Dict = answer_loss_cutoff
__A : Union[str, Any] = max_num_rows
__A : Optional[Any] = max_num_columns
__A : int = average_logits_per_cell
__A : Optional[Any] = select_one_column
__A : int = allow_empty_column_selection
__A : List[Any] = init_cell_selection_weights_to_zero
__A : int = reset_position_index_per_cell
__A : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
__A : Optional[Any] = aggregation_labels
__A : List[str] = no_aggregation_label_index
if isinstance(self.aggregation_labels , _UpperCAmelCase):
__A : Optional[Any] = {int(_UpperCAmelCase): v for k, v in aggregation_labels.items()}
| 8
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Optional[int] = """facebook/bart-large-mnli"""
_A : str = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
_A : List[str] = """text_classifier"""
_A : Optional[int] = AutoTokenizer
_A : Optional[Any] = AutoModelForSequenceClassification
_A : List[str] = ["""text""", ["""text"""]]
_A : Dict = ["""text"""]
def lowerCamelCase(self ):
super().setup()
A_ : int = self.model.config
A_ : List[str] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
A_ : List[Any] = int(lowerCAmelCase_ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : List[Any] = labels
return self.pre_processor(
[text] * len(lowerCAmelCase_ ) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def lowerCamelCase(self , lowerCAmelCase_ ):
A_ : str = outputs.logits
A_ : Optional[int] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 480
|
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_lowerCAmelCase = "."
if __name__ == "__main__":
_lowerCAmelCase = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
_lowerCAmelCase = []
_lowerCAmelCase = []
with open(doctest_file_path) as fp:
for line in fp:
_lowerCAmelCase = line.strip()
_lowerCAmelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_lowerCAmelCase = "\n".join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 480
| 1
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''' )
@patch('''builtins.open''' )
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : List[str] ) -> Tuple:
__snake_case = Mock()
__snake_case = conn, Mock()
__snake_case = iter([1, None] )
__snake_case = lambda snake_case_ : next(UpperCAmelCase_ )
# ===== invoke =====
send_file(filename='''mytext.txt''' , testing=UpperCAmelCase_ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 592
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , ) -> Union[str, Any]:
super().__init__()
A__ = nn.Embedding(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = nn.Embedding(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = False
A__ = nn.Dropout(p=SCREAMING_SNAKE_CASE__ )
A__ = TaConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , d_model=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , d_kv=SCREAMING_SNAKE_CASE__ , d_ff=SCREAMING_SNAKE_CASE__ , dropout_rate=SCREAMING_SNAKE_CASE__ , feed_forward_proj=SCREAMING_SNAKE_CASE__ , is_decoder=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , )
A__ = nn.ModuleList()
for lyr_num in range(SCREAMING_SNAKE_CASE__ ):
A__ = TaBlock(SCREAMING_SNAKE_CASE__ )
self.encoders.append(SCREAMING_SNAKE_CASE__ )
A__ = TaLayerNorm(SCREAMING_SNAKE_CASE__ )
A__ = nn.Dropout(p=SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
A__ = self.token_embedder(SCREAMING_SNAKE_CASE__ )
A__ = encoder_input_tokens.shape[1]
A__ = torch.arange(SCREAMING_SNAKE_CASE__ , device=encoder_input_tokens.device )
x += self.position_encoding(SCREAMING_SNAKE_CASE__ )
A__ = self.dropout_pre(SCREAMING_SNAKE_CASE__ )
# inverted the attention mask
A__ = encoder_input_tokens.size()
A__ = self.get_extended_attention_mask(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for lyr in self.encoders:
A__ = lyr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
A__ = self.layer_norm(SCREAMING_SNAKE_CASE__ )
return self.dropout_post(SCREAMING_SNAKE_CASE__ ), encoder_inputs_mask
| 104
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = DanceDiffusionPipeline
__SCREAMING_SNAKE_CASE : List[Any] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__SCREAMING_SNAKE_CASE : Optional[Any] = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
__SCREAMING_SNAKE_CASE : Dict = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Dict = False
def _a (self ):
torch.manual_seed(0 )
A_ : Any = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowercase , use_timestep_embedding=lowercase , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
A_ : Any = IPNDMScheduler()
A_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def _a (self , lowercase , lowercase=0 ):
if str(lowercase ).startswith("""mps""" ):
A_ : Dict = torch.manual_seed(lowercase )
else:
A_ : Tuple = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : int = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def _a (self ):
A_ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Any = self.get_dummy_components()
A_ : List[str] = DanceDiffusionPipeline(**lowercase )
A_ : List[str] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Optional[Any] = self.get_dummy_inputs(lowercase )
A_ : Union[str, Any] = pipe(**lowercase )
A_ : Tuple = output.audios
A_ : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
A_ : Dict = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _a (self ):
return super().test_save_load_local()
@skip_mps
def _a (self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _a (self ):
return super().test_save_load_optional_components()
@skip_mps
def _a (self ):
return super().test_attention_slicing_forward_pass()
def _a (self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : Any = torch_device
A_ : int = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
A_ : Optional[int] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = torch.manual_seed(0 )
A_ : Optional[int] = pipe(generator=lowercase , num_inference_steps=100 , audio_length_in_s=4.0_96 )
A_ : Tuple = output.audios
A_ : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
A_ : Optional[int] = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _a (self ):
A_ : Union[str, Any] = torch_device
A_ : List[Any] = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
A_ : str = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : Optional[int] = pipe(generator=lowercase , num_inference_steps=100 , audio_length_in_s=4.0_96 )
A_ : List[str] = output.audios
A_ : Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
A_ : str = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 686
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(
lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , )
A_ : Optional[int] = field
A_ : Dict = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths}
A_ : Optional[Any] = Json(
cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , )
def _a (self ):
# Build iterable dataset
if self.streaming:
A_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : int = None
A_ : Union[str, Any] = None
A_ : int = None
A_ : List[str] = None
self.builder.download_and_prepare(
download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , )
A_ : str = self.builder.as_dataset(
split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
A_ : Any = dataset
A_ : List[str] = path_or_buf
A_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A_ : Optional[Any] = num_proc
A_ : List[Any] = """utf-8"""
A_ : int = to_json_kwargs
def _a (self ):
A_ : Tuple = self.to_json_kwargs.pop("""path_or_buf""" , lowercase )
A_ : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
A_ : Dict = self.to_json_kwargs.pop("""compression""" , lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowercase ) as buffer:
A_ : Tuple = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
""" was passed. Please provide a local path instead.""" )
A_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
return written
def _a (self , lowercase ):
A_, A_, A_, A_, A_ : List[str] = args
A_ : List[str] = query_table(
table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
A_ : Any = batch.to_pandas().to_json(
path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def _a (self , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
A_ : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
A_ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase )
else:
A_, A_ : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowercase )
return written
| 686
| 1
|
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CustomTokenizer
pass
| 316
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_snake_case ).to(_snake_case )
__lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
__lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
__lowerCamelCase = model(input_ids.to(_snake_case ) , labels=labels.to(_snake_case ) ).loss
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 316
| 1
|
'''simple docstring'''
import os
import sys
import transformers
_UpperCamelCase : Union[str, Any] = """3"""
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 706
|
'''simple docstring'''
class snake_case__ :
def __init__( self : Dict , _A : int ) -> Tuple:
UpperCAmelCase_ : List[str] = n
UpperCAmelCase_ : Optional[Any] = [None] * self.n
UpperCAmelCase_ : List[str] = 0 # index of the first element
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : List[Any] = 0
def __len__( self : Optional[int] ) -> int:
return self.size
def A ( self : List[Any] ) -> bool:
return self.size == 0
def A ( self : str ) -> Dict:
return False if self.is_empty() else self.array[self.front]
def A ( self : Any , _A : int ) -> List[str]:
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
UpperCAmelCase_ : Dict = data
UpperCAmelCase_ : List[str] = (self.rear + 1) % self.n
self.size += 1
return self
def A ( self : Optional[int] ) -> str:
if self.size == 0:
raise Exception('''UNDERFLOW''' )
UpperCAmelCase_ : Dict = self.array[self.front]
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = (self.front + 1) % self.n
self.size -= 1
return temp
| 216
| 0
|
from math import factorial
snake_case_ = {str(digit): factorial(digit) for digit in range(10)}
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(SCREAMING_SNAKE_CASE_ ) )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = 60 , SCREAMING_SNAKE_CASE_ : int = 1_000_000 ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
lowercase__ : Any = 0
# the cached sizes of the previous chains
lowercase__ : dict[int, int] = {}
for start_chain_element in range(1 , SCREAMING_SNAKE_CASE_ ):
# The temporary set will contain the elements of the chain
lowercase__ : int = set()
lowercase__ : Optional[int] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowercase__ : int = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(SCREAMING_SNAKE_CASE_ )
chain_set_length += 1
lowercase__ : Dict = digit_factorial_sum(SCREAMING_SNAKE_CASE_ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowercase__ : Optional[Any] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution()}''')
| 164
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
snake_case_ = logging.get_logger(__name__)
snake_case_ = Dict[str, Any]
snake_case_ = List[Prediction]
@add_end_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ (__snake_case ):
def __init__( self , *a , **a):
super().__init__(*a , **a)
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""")
requires_backends(self , 'vision')
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items()))
def snake_case_ ( self , **a):
lowercase__ : Optional[int] = {}
if "threshold" in kwargs:
lowercase__ : List[Any] = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *a , **a):
return super().__call__(*a , **a)
def snake_case_ ( self , a):
lowercase__ : Optional[int] = load_image(a)
lowercase__ : Any = torch.IntTensor([[image.height, image.width]])
lowercase__ : int = self.image_processor(images=[image] , return_tensors='pt')
if self.tokenizer is not None:
lowercase__ : str = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt')
lowercase__ : Union[str, Any] = target_size
return inputs
def snake_case_ ( self , a):
lowercase__ : Any = model_inputs.pop('target_size')
lowercase__ : Tuple = self.model(**a)
lowercase__ : Dict = outputs.__class__({'target_size': target_size, **outputs})
if self.tokenizer is not None:
lowercase__ : Tuple = model_inputs['bbox']
return model_outputs
def snake_case_ ( self , a , a=0.9):
lowercase__ : Union[str, Any] = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowercase__ , lowercase__ : Tuple = target_size[0].tolist()
def unnormalize(a):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
]))
lowercase__ , lowercase__ : List[str] = model_outputs['logits'].squeeze(0).softmax(dim=-1).max(dim=-1)
lowercase__ : Optional[int] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowercase__ : List[Any] = [unnormalize(a) for bbox in model_outputs['bbox'].squeeze(0)]
lowercase__ : str = ['score', 'label', 'box']
lowercase__ : List[str] = [dict(zip(a , a)) for vals in zip(scores.tolist() , a , a) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowercase__ : Tuple = self.image_processor.post_process_object_detection(a , a , a)
lowercase__ : Union[str, Any] = raw_annotations[0]
lowercase__ : List[Any] = raw_annotation['scores']
lowercase__ : Optional[int] = raw_annotation['labels']
lowercase__ : List[Any] = raw_annotation['boxes']
lowercase__ : List[str] = scores.tolist()
lowercase__ : Any = [self.model.config.idalabel[label.item()] for label in labels]
lowercase__ : str = [self._get_bounding_box(a) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowercase__ : Union[str, Any] = ['score', 'label', 'box']
lowercase__ : Optional[Any] = [
dict(zip(a , a))
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'])
]
return annotation
def snake_case_ ( self , a):
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.')
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = box.int().tolist()
lowercase__ : Optional[Any] = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 164
| 1
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( a__):
def __init__( self, A, A, A, A, A, A, A, A, A, ):
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=A, speech_processor=A, vae=A, text_encoder=A, tokenizer=A, unet=A, scheduler=A, feature_extractor=A, )
def UpperCAmelCase_ ( self, A = "auto" ):
"""simple docstring"""
if slice_size == "auto":
lowerCamelCase : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self, A, A=1_6000, A = 512, A = 512, A = 50, A = 7.5, A = None, A = 1, A = 0.0, A = None, A = None, A = "pil", A = True, A = None, A = 1, **A, ):
"""simple docstring"""
lowerCamelCase : int = self.speech_processor.feature_extractor(
A, return_tensors='pt', sampling_rate=A ).input_features.to(self.device )
lowerCamelCase : str = self.speech_model.generate(A, max_length=48_0000 )
lowerCamelCase : Union[str, Any] = self.speech_processor.tokenizer.batch_decode(A, skip_special_tokens=A, normalize=A )[
0
]
if isinstance(A, A ):
lowerCamelCase : Any = 1
elif isinstance(A, A ):
lowerCamelCase : Any = len(A )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A, A ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(A )}.''' )
# get prompt text embeddings
lowerCamelCase : Optional[Any] = self.tokenizer(
A, padding='max_length', max_length=self.tokenizer.model_max_length, return_tensors='pt', )
lowerCamelCase : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase : Dict = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCamelCase : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCamelCase : Optional[int] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase : Union[str, Any] = text_embeddings.shape
lowerCamelCase : str = text_embeddings.repeat(1, A, 1 )
lowerCamelCase : str = text_embeddings.view(bs_embed * num_images_per_prompt, A, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase : List[str]
if negative_prompt is None:
lowerCamelCase : int = [''] * batch_size
elif type(A ) is not type(A ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='''
F''' {type(A )}.''' )
elif isinstance(A, A ):
lowerCamelCase : Any = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
lowerCamelCase : Dict = negative_prompt
lowerCamelCase : Optional[Any] = text_input_ids.shape[-1]
lowerCamelCase : Optional[int] = self.tokenizer(
A, padding='max_length', max_length=A, truncation=A, return_tensors='pt', )
lowerCamelCase : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase : List[Any] = uncond_embeddings.shape[1]
lowerCamelCase : int = uncond_embeddings.repeat(1, A, 1 )
lowerCamelCase : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt, A, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase : Tuple = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase : int = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase : List[str] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase : List[Any] = torch.randn(A, generator=A, device='cpu', dtype=A ).to(
self.device )
else:
lowerCamelCase : Optional[Any] = torch.randn(A, generator=A, device=self.device, dtype=A )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCamelCase : str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase : Tuple = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase : List[str] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase : str = {}
if accepts_eta:
lowerCamelCase : List[str] = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase : Optional[int] = self.scheduler.scale_model_input(A, A )
# predict the noise residual
lowerCamelCase : List[Any] = self.unet(A, A, encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase : Optional[int] = noise_pred.chunk(2 )
lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase : Optional[int] = self.scheduler.step(A, A, A, **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A, A, A )
lowerCamelCase : Optional[int] = 1 / 0.1_8215 * latents
lowerCamelCase : List[Any] = self.vae.decode(A ).sample
lowerCamelCase : List[str] = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase : List[Any] = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase : Dict = self.numpy_to_pil(A )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=A, nsfw_content_detected=A )
| 710
|
'''simple docstring'''
from __future__ import annotations
A = '#'
class __snake_case :
def __init__( self ):
"""simple docstring"""
lowerCamelCase : dict = {}
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : int = self._trie
for char in text:
if char not in trie:
lowerCamelCase : Dict = {}
lowerCamelCase : Optional[int] = trie[char]
lowerCamelCase : Optional[Any] = True
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Dict = self._trie
for char in prefix:
if char in trie:
lowerCamelCase : int = trie[char]
else:
return []
return self._elements(A )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = []
for c, v in d.items():
lowerCamelCase : Optional[Any] = [' '] if c == END else [(c + s) for s in self._elements(A )]
result.extend(A )
return tuple(A )
A = Trie()
A = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase : Any = trie.find_word(UpperCAmelCase__)
return tuple(string + word for word in suffixes)
def UpperCAmelCase ( ):
print(autocomplete_using_trie('de'))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 449
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class UpperCAmelCase_ ( __lowerCamelCase ):
"""simple docstring"""
snake_case = 'table-transformer'
snake_case = ['past_key_values']
snake_case = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , UpperCAmelCase_=True , UpperCAmelCase_=None , UpperCAmelCase_=3 , UpperCAmelCase_=1_00 , UpperCAmelCase_=6 , UpperCAmelCase_=20_48 , UpperCAmelCase_=8 , UpperCAmelCase_=6 , UpperCAmelCase_=20_48 , UpperCAmelCase_=8 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=True , UpperCAmelCase_="relu" , UpperCAmelCase_=2_56 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1.0 , UpperCAmelCase_=False , UpperCAmelCase_="sine" , UpperCAmelCase_="resnet50" , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=1 , UpperCAmelCase_=5 , UpperCAmelCase_=2 , UpperCAmelCase_=1 , UpperCAmelCase_=1 , UpperCAmelCase_=5 , UpperCAmelCase_=2 , UpperCAmelCase_=0.1 , **UpperCAmelCase_ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can\'t specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
snake_case_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
snake_case_ = backbone_config.get("model_type" )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(lowerCamelCase__ )
# set timm attributes to None
snake_case_ , snake_case_ , snake_case_ = None, None, None
snake_case_ = use_timm_backbone
snake_case_ = backbone_config
snake_case_ = num_channels
snake_case_ = num_queries
snake_case_ = d_model
snake_case_ = encoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = encoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = init_xavier_std
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = encoder_layers
snake_case_ = auxiliary_loss
snake_case_ = position_embedding_type
snake_case_ = backbone
snake_case_ = use_pretrained_backbone
snake_case_ = dilation
# Hungarian matcher
snake_case_ = class_cost
snake_case_ = bbox_cost
snake_case_ = giou_cost
# Loss coefficients
snake_case_ = mask_loss_coefficient
snake_case_ = dice_loss_coefficient
snake_case_ = bbox_loss_coefficient
snake_case_ = giou_loss_coefficient
snake_case_ = eos_coefficient
super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ )
@property
def _lowercase ( self ):
return self.encoder_attention_heads
@property
def _lowercase ( self ):
return self.d_model
class UpperCAmelCase_ ( __lowerCamelCase ):
"""simple docstring"""
snake_case = version.parse("""1.11""" )
@property
def _lowercase ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowercase ( self ):
return 1e-5
@property
def _lowercase ( self ):
return 12
| 508
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowercase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__lowercase = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"config.{attribute}" in modeling_source
or f"getattr(config, \"{attribute}\"" in modeling_source
or f"getattr(self.config, \"{attribute}\"" in modeling_source
):
A_ = True
# Deal with multi-line cases
elif (
re.search(
Rf"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , SCREAMING_SNAKE_CASE , )
is not None
):
A_ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
A_ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
A_ = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
A_ = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
A_ = True
if not attribute_used:
A_ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
A_ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
A_ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
A_ = True
elif attribute.endswith('''_token_id''' ):
A_ = True
# configuration class specific cases
if not case_allowed:
A_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
A_ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = dict(inspect.signature(config_class.__init__ ).parameters )
A_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
A_ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
A_ = {}
if len(config_class.attribute_map ) > 0:
A_ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
A_ = inspect.getsourcefile(SCREAMING_SNAKE_CASE )
A_ = os.path.dirname(SCREAMING_SNAKE_CASE )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
A_ = [os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for fn in os.listdir(SCREAMING_SNAKE_CASE ) if fn.startswith('''modeling_''' )]
# Get the source code strings
A_ = []
for path in modeling_paths:
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE ) as fp:
modeling_sources.append(fp.read() )
A_ = []
for config_param, default_value in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# `attributes` here is all the variant names for `config_param`
A_ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
unused_attributes.append(attributes[0] )
return sorted(SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( ):
'''simple docstring'''
A_ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
A_ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda SCREAMING_SNAKE_CASE : inspect.isclass(SCREAMING_SNAKE_CASE )
and issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and inspect.getmodule(SCREAMING_SNAKE_CASE ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
A_ = check_config_attributes_being_used(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
A_ = unused_attributes
if len(SCREAMING_SNAKE_CASE ) > 0:
A_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f"{name}: {attributes}\n"
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
check_config_attributes()
| 203
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class a :
UpperCAmelCase_ : Optional[Any] =42
UpperCAmelCase_ : List[Any] =None
# Automatically constructed
UpperCAmelCase_ : Tuple ="dict"
UpperCAmelCase_ : List[Any] =None
UpperCAmelCase_ : List[Any] =field(default="Translation", init=a_, repr=a_ )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCamelCase_ ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class a :
UpperCAmelCase_ : List[str] =None
UpperCAmelCase_ : List[Any] =None
UpperCAmelCase_ : List[str] =None
# Automatically constructed
UpperCAmelCase_ : int ="dict"
UpperCAmelCase_ : Optional[int] =None
UpperCAmelCase_ : Any =field(default="TranslationVariableLanguages", init=a_, repr=a_ )
def UpperCamelCase_ ( self ):
lowercase = sorted(set(self.languages ) ) if self.languages else None
lowercase = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = set(self.languages )
if self.languages and set(_SCREAMING_SNAKE_CASE ) - lang_set:
raise ValueError(
F'Some languages in example ({", ".join(sorted(set(_SCREAMING_SNAKE_CASE ) - lang_set ) )}) are not in valid set ({", ".join(_SCREAMING_SNAKE_CASE )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowercase = []
for lang, text in translation_dict.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowercase , lowercase = zip(*sorted(_SCREAMING_SNAKE_CASE ) )
return {"language": languages, "translation": translations}
def UpperCamelCase_ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 713
|
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_UpperCamelCase : int = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase : Any = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_UpperCamelCase : int = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_UpperCamelCase : Union[str, Any] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
_UpperCamelCase : Any = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
_UpperCamelCase : Any = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
_UpperCamelCase : Union[str, Any] = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
_UpperCamelCase : Tuple = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
_UpperCamelCase : int = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
_UpperCamelCase : Tuple = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class a ( a_ ):
UpperCAmelCase_ : str =VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[int] =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Union[str, Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : int =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class a ( a_ ):
UpperCAmelCase_ : Union[str, Any] =VOCAB_FILES_NAMES
UpperCAmelCase_ : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Dict =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[int] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
_UpperCamelCase : Tuple = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
_UpperCamelCase : int = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(a_ )
class a :
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
if titles is None and texts is None:
return super().__call__(
_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
elif titles is None or texts is None:
lowercase = titles if texts is None else texts
return super().__call__(
_lowerCamelCase , _lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
lowercase = titles if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [titles]
lowercase = texts if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [texts]
lowercase = len(_lowerCamelCase )
lowercase = questions if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [questions] * n_passages
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(
F'There should be as many titles than texts but got {len(_lowerCamelCase )} titles and {len(_lowerCamelCase )} texts.' )
lowercase = super().__call__(_lowerCamelCase , _lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase )['input_ids']
lowercase = super().__call__(_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase )['input_ids']
lowercase = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowerCamelCase , _lowerCamelCase )
]
}
if return_attention_mask is not False:
lowercase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase = attention_mask
return self.pad(_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1_6 , _lowerCamelCase = 6_4 , _lowerCamelCase = 4 , ):
lowercase = reader_input['input_ids']
lowercase , lowercase , lowercase = reader_output[:3]
lowercase = len(_lowerCamelCase )
lowercase = sorted(range(_lowerCamelCase ) , reverse=_lowerCamelCase , key=relevance_logits.__getitem__ )
lowercase = []
for doc_id in sorted_docs:
lowercase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase = sequence_ids.index(self.pad_token_id )
else:
lowercase = len(_lowerCamelCase )
lowercase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowerCamelCase , top_spans=_lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowerCamelCase , start_index=_lowerCamelCase , end_index=_lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowercase = []
for start_index, start_score in enumerate(_lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] , reverse=_lowerCamelCase )
lowercase = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'Wrong span indices: [{start_index}:{end_index}]' )
lowercase = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a_ )
class a ( a_, a_ ):
UpperCAmelCase_ : Union[str, Any] =VOCAB_FILES_NAMES
UpperCAmelCase_ : Union[str, Any] =READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : List[Any] =READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : Optional[int] =["input_ids", "attention_mask"]
| 134
| 0
|
'''simple docstring'''
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[Any]=512 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : List[Any]=None , ) -> Optional[int]:
"""simple docstring"""
snake_case : Optional[Any] = parent
snake_case : List[Any] = batch_size
snake_case : List[str] = seq_length
snake_case : int = is_training
snake_case : Any = use_input_mask
snake_case : str = use_token_type_ids
snake_case : List[Any] = use_labels
snake_case : List[str] = vocab_size
snake_case : Tuple = hidden_size
snake_case : int = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : List[Any] = intermediate_multiple_size
snake_case : List[Any] = hidden_act
snake_case : Optional[Any] = hidden_dropout
snake_case : Tuple = attention_dropout
snake_case : Optional[int] = weight_tying
snake_case : Tuple = max_position_embeddings
snake_case : Union[str, Any] = type_vocab_size
snake_case : Tuple = type_sequence_label_size
snake_case : int = initializer_range
snake_case : Optional[int] = num_labels
snake_case : Tuple = num_choices
snake_case : Optional[Any] = scope
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Tuple = None
if self.use_input_mask:
snake_case : str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Optional[Any] = None
if self.use_labels:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
snake_case ,snake_case ,snake_case ,snake_case : Any = self.prepare_config_and_inputs()
snake_case : List[str] = True
return config, input_ids, input_mask, token_labels
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
snake_case : Union[str, Any] = GPTNeoXJapaneseModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
snake_case : Union[str, Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
snake_case : Optional[Any] = True
snake_case : List[str] = GPTNeoXJapaneseModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : str = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
snake_case : Optional[int] = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = True
snake_case : List[Any] = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
snake_case : Union[str, Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
snake_case : Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case : str = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
snake_case : Optional[int] = output_from_no_past['''hidden_states'''][0]
snake_case : Optional[Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )['''hidden_states'''][0]
# select random slice
snake_case : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
snake_case : List[str] = self.prepare_config_and_inputs()
snake_case ,snake_case ,snake_case ,snake_case : Union[str, Any] = config_and_inputs
snake_case : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCamelCase = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCamelCase = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
snake_case : List[Any] = GPTNeoXJapaneseModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
snake_case ,snake_case ,snake_case ,snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
snake_case ,snake_case ,snake_case ,snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
snake_case ,snake_case ,snake_case ,snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case : str = None
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
snake_case ,snake_case ,snake_case ,snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*UpperCamelCase__ )
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : int = '''abeja/gpt-neox-japanese-2.7b'''
snake_case : Optional[Any] = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
snake_case : Optional[Any] = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
snake_case : int = GPTNeoXJapaneseTokenizer.from_pretrained(UpperCamelCase__ )
snake_case : Tuple = GPTNeoXJapaneseForCausalLM.from_pretrained(UpperCamelCase__ )
snake_case : List[Any] = []
for prompt in prompts:
snake_case : str = tokenizer(UpperCamelCase__ , return_tensors='''pt''' ).input_ids
snake_case : Dict = model.generate(UpperCamelCase__ , max_length=50 )
snake_case : List[Any] = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
| 638
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 638
| 1
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''efficientformer'''
def __init__( self , lowerCAmelCase_ = [3, 2, 6, 4] , lowerCAmelCase_ = [48, 96, 224, 448] , lowerCAmelCase_ = [True, True, True, True] , lowerCAmelCase_ = 448 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 4 , lowerCAmelCase_ = 7 , lowerCAmelCase_ = 5 , lowerCAmelCase_ = 8 , lowerCAmelCase_ = 4 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 16 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = 1E-5 , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = 1E-12 , lowerCAmelCase_ = 224 , lowerCAmelCase_ = 1E-05 , **lowerCAmelCase_ , ) -> None:
super().__init__(**lowerCAmelCase_ )
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = hidden_sizes
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = patch_size
_snake_case = num_channels
_snake_case = depths
_snake_case = mlp_expansion_ratio
_snake_case = downsamples
_snake_case = dim
_snake_case = key_dim
_snake_case = attention_ratio
_snake_case = resolution
_snake_case = pool_size
_snake_case = downsample_patch_size
_snake_case = downsample_stride
_snake_case = downsample_pad
_snake_case = drop_path_rate
_snake_case = num_metaad_blocks
_snake_case = distillation
_snake_case = use_layer_scale
_snake_case = layer_scale_init_value
_snake_case = image_size
_snake_case = batch_norm_eps
| 710
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''vit_msn'''
def __init__( self , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=3072 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-06 , lowerCAmelCase_=224 , lowerCAmelCase_=16 , lowerCAmelCase_=3 , lowerCAmelCase_=True , **lowerCAmelCase_ , ) -> str:
super().__init__(**lowerCAmelCase_ )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
| 541
| 0
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCAmelCase_ : Any = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 44
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase__ : Optional[Any] = {'UserAgent': UserAgent().random}
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = script.contents[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = F'''https://www.instagram.com/{username}/'''
SCREAMING_SNAKE_CASE__ : List[str] = self.get_json()
def __magic_name__ (self ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = requests.get(self.url , headers=SCREAMING_SNAKE_CASE__ ).text
SCREAMING_SNAKE_CASE__ : List[str] = BeautifulSoup(SCREAMING_SNAKE_CASE__ , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ) -> str:
"""simple docstring"""
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__(self ) -> str:
"""simple docstring"""
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return self.user_data["username"]
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return self.user_data["full_name"]
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return self.user_data["biography"]
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return self.user_data["business_email"]
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return self.user_data["external_url"]
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def __magic_name__ (self ) -> bool:
"""simple docstring"""
return self.user_data["is_verified"]
@property
def __magic_name__ (self ) -> bool:
"""simple docstring"""
return self.user_data["is_private"]
def lowercase_ ( _snake_case = "github" ):
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
SCREAMING_SNAKE_CASE__ : List[Any] = InstagramUser(_snake_case )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,_snake_case )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : List[str] = InstagramUser('github')
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 223
| 0
|
"""simple docstring"""
def lowercase__ ( lowerCAmelCase : int ) -> int:
"""simple docstring"""
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def lowercase__ ( lowerCAmelCase : int ) -> bool:
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = number
while duplicate > 0:
UpperCAmelCase , UpperCAmelCase = divmod(lowerCAmelCase , 10 )
fact_sum += factorial(lowerCAmelCase )
return fact_sum == number
if __name__ == "__main__":
print('''Program to check whether a number is a Krisnamurthy Number or not.''')
SCREAMING_SNAKE_CASE_ = int(input('''Enter number: ''').strip())
print(
F'{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.'
)
| 183
|
"""simple docstring"""
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def lowercase__ ( ) -> None:
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 183
| 1
|
import os
def a_ ( ):
__lowerCAmelCase = os.path.dirname(os.path.realpath(lowerCAmelCase_ ) )
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'triangle.txt' )
with open(lowerCAmelCase_ ) as f:
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = []
for line in triangle:
__lowerCAmelCase = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(lowerCAmelCase_ ) )
a.append(lowerCAmelCase_ )
for i in range(1, len(lowerCAmelCase_ ) ):
for j in range(len(a[i] ) ):
__lowerCAmelCase = a[i - 1][j] if j != len(a[i - 1] ) else 0
__lowerCAmelCase = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCAmelCase_, lowerCAmelCase_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 53
|
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , A_=3 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=5_12 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self ):
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=A_ , )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = FalconModel(config=A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ )
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = FalconModel(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
SCREAMING_SNAKE_CASE__ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , )
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(config=A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE__ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , )
SCREAMING_SNAKE_CASE__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['''hidden_states'''][0]
SCREAMING_SNAKE_CASE__ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['''hidden_states'''][0]
# select random slice
SCREAMING_SNAKE_CASE__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1E-3 ) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Dict = (FalconForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : List[Any] = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Tuple = False
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = FalconModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ , hidden_size=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
SCREAMING_SNAKE_CASE__ = alibi
self.model_tester.create_and_check_model(A_ , *A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(A_ )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = '''single_label_classification'''
SCREAMING_SNAKE_CASE__ = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(A_ )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , use_cache=A_ )
SCREAMING_SNAKE_CASE__ = input_ids.shape[0]
SCREAMING_SNAKE_CASE__ = model._convert_to_rw_cache(result.past_key_values )
SCREAMING_SNAKE_CASE__ = model._convert_cache_to_standard_format(A_ , A_ )
for layer in range(len(A_ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = '''multi_label_classification'''
SCREAMING_SNAKE_CASE__ = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(A_ )
SCREAMING_SNAKE_CASE__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self ):
'''simple docstring'''
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(A_ , '''use_cache''' ):
return
SCREAMING_SNAKE_CASE__ = model_class(A_ ).to(A_ )
if "use_cache" not in inputs:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model(**A_ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
SCREAMING_SNAKE_CASE__ = (
getattr(A_ , '''decoder_layers''' , A_ )
or getattr(A_ , '''num_decoder_layers''' , A_ )
or config.num_hidden_layers
)
SCREAMING_SNAKE_CASE__ = getattr(A_ , '''num_kv_heads''' , config.num_attention_heads )
SCREAMING_SNAKE_CASE__ = getattr(A_ , '''d_model''' , config.hidden_size )
SCREAMING_SNAKE_CASE__ = embed_dim // num_attention_heads
SCREAMING_SNAKE_CASE__ = outputs['''past_key_values''']
self.assertEqual(len(A_ ) , A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = inputs['''input_ids'''].shape
for i in range(A_ ):
if config.new_decoder_architecture:
SCREAMING_SNAKE_CASE__ = config.num_attention_heads
elif config.multi_query:
SCREAMING_SNAKE_CASE__ = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(A_ )
SCREAMING_SNAKE_CASE__ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(A_ )
SCREAMING_SNAKE_CASE__ = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
SCREAMING_SNAKE_CASE__ = model.generate(**A_ , do_sample=A_ , max_new_tokens=19 )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(A_ )[0]
self.assertEqual(A_ , A_ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained(A_ )
model.eval()
model.to(A_ )
SCREAMING_SNAKE_CASE__ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(A_ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**A_ , do_sample=A_ , max_new_tokens=4 )
model.generate(**A_ , do_sample=A_ , max_new_tokens=4 )
model.generate(**A_ , num_beams=2 , max_new_tokens=4 )
@slow
def lowercase_ ( self ):
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained(A_ )
model.eval()
model.to(device=A_ )
SCREAMING_SNAKE_CASE__ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(A_ )
# Test results are the same with and without cache
SCREAMING_SNAKE_CASE__ = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 , use_cache=A_ )
SCREAMING_SNAKE_CASE__ = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 , use_cache=A_ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 100
| 0
|
from __future__ import annotations
from random import choice
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ):
'''simple docstring'''
return choice(snake_case_ )
def UpperCAmelCase__ ( UpperCAmelCase__ :list[int] , UpperCAmelCase__ :int ):
'''simple docstring'''
a = random_pivot(snake_case_ )
# partition based on pivot
# linear time
a = [e for e in lst if e < pivot]
a = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(snake_case_ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(snake_case_ ) < k - 1:
return kth_number(snake_case_ , k - len(snake_case_ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(snake_case_ , snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
A_ : List[str] = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = ['''audio_values''', '''audio_mask''']
def __init__( self : List[Any] , __lowerCAmelCase : Dict=2048 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Dict=[16, 16] , __lowerCAmelCase : str=128 , __lowerCAmelCase : Optional[int]=4_4100 , __lowerCAmelCase : int=86 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : str=0.0 , **__lowerCAmelCase : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , )
a = spectrogram_length
a = num_channels
a = patch_size
a = feature_size // self.patch_size[1]
a = n_fft
a = sampling_rate // hop_length_to_sampling_rate
a = sampling_rate
a = padding_value
a = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__lowerCAmelCase , norm="slaney" , mel_scale="slaney" , ).T
def A ( self : List[str] , __lowerCAmelCase : np.array ) -> np.ndarray:
"""simple docstring"""
a = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=8_0.0 , )
a = log_spec[:, :-1]
a = log_spec - 2_0.0
a = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
a = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
a = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
a = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __lowerCAmelCase ):
a = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a = np.array(__lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a = padded_audio_features * self.padding_value
for i in range(len(__lowerCAmelCase ) ):
a = audio_features[i]
a = feature
# return as BatchFeature
if return_attention_mask:
a = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
a = {"audio_values": padded_audio_features}
a = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
return encoded_inputs
| 32
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=1_0 , __magic_name__=[1_0, 2_0, 3_0, 4_0] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ):
lowerCamelCase : Tuple = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : List[Any] = image_size
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : Dict = embeddings_size
lowerCamelCase : Optional[int] = hidden_sizes
lowerCamelCase : Union[str, Any] = depths
lowerCamelCase : Optional[Any] = is_training
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Any = num_labels
lowerCamelCase : int = scope
lowerCamelCase : Optional[Any] = len(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Dict = TFResNetModel(config=__magic_name__ )
lowerCamelCase : Tuple = model(__magic_name__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = self.num_labels
lowerCamelCase : Dict = TFResNetForImageClassification(__magic_name__ )
lowerCamelCase : Union[str, Any] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = config_and_inputs
lowerCamelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase : List[str] = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Any = False
def UpperCamelCase__ ( self ):
lowerCamelCase : int = TFResNetModelTester(self )
lowerCamelCase : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def UpperCamelCase__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = model_class(__magic_name__ )
lowerCamelCase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Tuple = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = model_class(__magic_name__ )
lowerCamelCase : List[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
lowerCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Tuple = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase : Union[str, Any] = layer_type
lowerCamelCase : str = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : int = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Any = TFResNetModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ):
lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase : List[str] = self.default_image_processor
lowerCamelCase : str = prepare_img()
lowerCamelCase : Tuple = image_processor(images=__magic_name__ , return_tensors="""tf""" )
# forward pass
lowerCamelCase : Tuple = model(**__magic_name__ )
# verify the logits
lowerCamelCase : Optional[Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
lowerCamelCase : Optional[Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __magic_name__ , atol=1e-4 ) )
| 681
| 0
|
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE ) ->List[Any]:
stooge(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
return arr
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
a__: Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
a__: Any = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , i + t , (_SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
lowercase__ = input('Enter numbers separated by a comma:\n').strip()
lowercase__ = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 703
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowercase__ = TypeVar('T')
class __snake_case ( Generic[T] ):
def __init__( self , lowercase) -> List[Any]:
'''simple docstring'''
a__: Union[str, Any] = data
a__: Node[T] | None = None
def __str__( self) -> str:
'''simple docstring'''
return f'{self.data}'
class __snake_case ( Generic[T] ):
def __init__( self) -> None:
'''simple docstring'''
a__: Node[T] | None = None
def __iter__( self) -> Iterator[T]:
'''simple docstring'''
a__: Union[str, Any] = self.top
while node:
yield node.data
a__: Optional[Any] = node.next
def __str__( self) -> str:
'''simple docstring'''
return "->".join([str(lowercase) for item in self])
def __len__( self) -> int:
'''simple docstring'''
return len(tuple(iter(self)))
def lowerCamelCase_ ( self) -> bool:
'''simple docstring'''
return self.top is None
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: Any = Node(lowercase)
if not self.is_empty():
a__: str = self.top
a__: Optional[int] = node
def lowerCamelCase_ ( self) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError('pop from empty stack')
assert isinstance(self.top , lowercase)
a__: Tuple = self.top
a__: List[Any] = self.top.next
return pop_node.data
def lowerCamelCase_ ( self) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError('peek from empty stack')
assert self.top is not None
return self.top.data
def lowerCamelCase_ ( self) -> None:
'''simple docstring'''
a__: Any = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 217
| 0
|
'''simple docstring'''
def __lowerCAmelCase ( a_ , a_ = False ) -> bool:
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE : Optional[Any] = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
SCREAMING_SNAKE_CASE : Optional[int] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_SCREAMING_SNAKE_CASE , 1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE : Any = primes[:idx]
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
for r in range(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : int = pow(_SCREAMING_SNAKE_CASE , d * 2**r , _SCREAMING_SNAKE_CASE )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE : Any = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 251
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Union[str, Any] = ['audio_values', 'audio_mask']
def __init__( self : Dict , a : Optional[Any]=2_048 , a : Union[str, Any]=1 , a : str=[16, 16] , a : Optional[int]=128 , a : str=44_100 , a : List[str]=86 , a : int=2_048 , a : Tuple=0.0 , **a : int , )-> Any:
"""simple docstring"""
super().__init__(
feature_size=a , sampling_rate=a , padding_value=a , **a , )
lowercase__ = spectrogram_length
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = feature_size // self.patch_size[1]
lowercase__ = n_fft
lowercase__ = sampling_rate // hop_length_to_sampling_rate
lowercase__ = sampling_rate
lowercase__ = padding_value
lowercase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=a , norm='slaney' , mel_scale='slaney' , ).T
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : np.array )-> np.ndarray:
"""simple docstring"""
lowercase__ = spectrogram(
a , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
lowercase__ = log_spec[:, :-1]
lowercase__ = log_spec - 20.0
lowercase__ = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : List[Any] , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : Optional[Union[str, TensorType]] = None , a : Optional[bool] = True , a : Optional[int] = None , a : bool = False , a : bool = False , **a : Union[str, Any] , )-> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowercase__ = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
lowercase__ = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , a ):
lowercase__ = [np.asarray(a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ = np.array(a ).astype(np.floataa )
# convert into correct format for padding
lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ = np.ones([len(a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ = padded_audio_features * self.padding_value
for i in range(len(a ) ):
lowercase__ = audio_features[i]
lowercase__ = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
lowercase__ = {'audio_values': padded_audio_features}
lowercase__ = BatchFeature(data=a , tensor_type=a )
return encoded_inputs
| 235
| 0
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCAmelCase__ = logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ = "ResNetConfig"
# Base docstring
UpperCAmelCase__ = "microsoft/resnet-50"
UpperCAmelCase__ = [1, 2048, 7, 7]
# Image classification docstring
UpperCAmelCase__ = "microsoft/resnet-50"
UpperCAmelCase__ = "tiger cat"
UpperCAmelCase__ = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __lowerCAmelCase ( nn.Module ):
def __init__( self : List[Any] , A : int , A : int , A : int = 3 , A : int = 1 , A : str = "relu") -> List[str]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Convad(
A , A , kernel_size=A , stride=A , padding=kernel_size // 2 , bias=A)
_UpperCAmelCase = nn.BatchNormad(A)
_UpperCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def _lowerCamelCase ( self : Optional[int] , A : Tensor) -> Tensor:
"""simple docstring"""
_UpperCAmelCase = self.convolution(A)
_UpperCAmelCase = self.normalization(A)
_UpperCAmelCase = self.activation(A)
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self : Dict , A : ResNetConfig) -> Optional[int]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act)
_UpperCAmelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1)
_UpperCAmelCase = config.num_channels
def _lowerCamelCase ( self : str , A : Tensor) -> Tensor:
"""simple docstring"""
_UpperCAmelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
_UpperCAmelCase = self.embedder(A)
_UpperCAmelCase = self.pooler(A)
return embedding
class __lowerCAmelCase ( nn.Module ):
def __init__( self : int , A : int , A : int , A : int = 2) -> Optional[int]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Convad(A , A , kernel_size=1 , stride=A , bias=A)
_UpperCAmelCase = nn.BatchNormad(A)
def _lowerCamelCase ( self : Optional[int] , A : Tensor) -> Tensor:
"""simple docstring"""
_UpperCAmelCase = self.convolution(A)
_UpperCAmelCase = self.normalization(A)
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self : Tuple , A : int , A : int , A : int = 1 , A : str = "relu") -> List[str]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = in_channels != out_channels or stride != 1
_UpperCAmelCase = (
ResNetShortCut(A , A , stride=A) if should_apply_shortcut else nn.Identity()
)
_UpperCAmelCase = nn.Sequential(
ResNetConvLayer(A , A , stride=A) , ResNetConvLayer(A , A , activation=A) , )
_UpperCAmelCase = ACTaFN[activation]
def _lowerCamelCase ( self : Tuple , A : Any) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = hidden_state
_UpperCAmelCase = self.layer(A)
_UpperCAmelCase = self.shortcut(A)
hidden_state += residual
_UpperCAmelCase = self.activation(A)
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self : str , A : int , A : int , A : int = 1 , A : str = "relu" , A : int = 4) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = in_channels != out_channels or stride != 1
_UpperCAmelCase = out_channels // reduction
_UpperCAmelCase = (
ResNetShortCut(A , A , stride=A) if should_apply_shortcut else nn.Identity()
)
_UpperCAmelCase = nn.Sequential(
ResNetConvLayer(A , A , kernel_size=1) , ResNetConvLayer(A , A , stride=A) , ResNetConvLayer(A , A , kernel_size=1 , activation=A) , )
_UpperCAmelCase = ACTaFN[activation]
def _lowerCamelCase ( self : List[Any] , A : Dict) -> int:
"""simple docstring"""
_UpperCAmelCase = hidden_state
_UpperCAmelCase = self.layer(A)
_UpperCAmelCase = self.shortcut(A)
hidden_state += residual
_UpperCAmelCase = self.activation(A)
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self : Dict , A : ResNetConfig , A : int , A : int , A : int = 2 , A : int = 2 , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_UpperCAmelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(A , A , stride=A , activation=config.hidden_act) , *[layer(A , A , activation=config.hidden_act) for _ in range(depth - 1)] , )
def _lowerCamelCase ( self : str , A : Tensor) -> Tensor:
"""simple docstring"""
_UpperCAmelCase = input
for layer in self.layers:
_UpperCAmelCase = layer(A)
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self : Any , A : ResNetConfig) -> Dict:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.ModuleList([])
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
_UpperCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(A , config.depths[1:]):
self.stages.append(ResNetStage(A , A , A , depth=A))
def _lowerCamelCase ( self : Dict , A : Tensor , A : bool = False , A : bool = True) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
_UpperCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCAmelCase = hidden_states + (hidden_state,)
_UpperCAmelCase = stage_module(A)
if output_hidden_states:
_UpperCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=A , hidden_states=A , )
class __lowerCAmelCase ( A ):
UpperCamelCase = ResNetConfig
UpperCamelCase = '''resnet'''
UpperCamelCase = '''pixel_values'''
UpperCamelCase = True
def _lowerCamelCase ( self : Any , A : int) -> str:
"""simple docstring"""
if isinstance(A , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu')
elif isinstance(A , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def _lowerCamelCase ( self : List[str] , A : List[Any] , A : Union[str, Any]=False) -> str:
"""simple docstring"""
if isinstance(A , A):
_UpperCAmelCase = value
UpperCAmelCase__ = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , A , )
class __lowerCAmelCase ( A ):
def __init__( self : List[Any] , A : Any) -> str:
"""simple docstring"""
super().__init__(A)
_UpperCAmelCase = config
_UpperCAmelCase = ResNetEmbeddings(A)
_UpperCAmelCase = ResNetEncoder(A)
_UpperCAmelCase = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCamelCase ( self : Optional[int] , A : Tensor , A : Optional[bool] = None , A : Optional[bool] = None) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = self.embedder(A)
_UpperCAmelCase = self.encoder(
A , output_hidden_states=A , return_dict=A)
_UpperCAmelCase = encoder_outputs[0]
_UpperCAmelCase = self.pooler(A)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A , pooler_output=A , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , A , )
class __lowerCAmelCase ( A ):
def __init__( self : Any , A : Any) -> int:
"""simple docstring"""
super().__init__(A)
_UpperCAmelCase = config.num_labels
_UpperCAmelCase = ResNetModel(A)
# classification head
_UpperCAmelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCamelCase ( self : Any , A : Optional[torch.FloatTensor] = None , A : Optional[torch.LongTensor] = None , A : Optional[bool] = None , A : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = self.resnet(A , output_hidden_states=A , return_dict=A)
_UpperCAmelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCAmelCase = self.classifier(A)
_UpperCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCAmelCase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCAmelCase = 'single_label_classification'
else:
_UpperCAmelCase = 'multi_label_classification'
if self.config.problem_type == "regression":
_UpperCAmelCase = MSELoss()
if self.num_labels == 1:
_UpperCAmelCase = loss_fct(logits.squeeze() , labels.squeeze())
else:
_UpperCAmelCase = loss_fct(A , A)
elif self.config.problem_type == "single_label_classification":
_UpperCAmelCase = CrossEntropyLoss()
_UpperCAmelCase = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
_UpperCAmelCase = BCEWithLogitsLoss()
_UpperCAmelCase = loss_fct(A , A)
if not return_dict:
_UpperCAmelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=A , logits=A , hidden_states=outputs.hidden_states)
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , A , )
class __lowerCAmelCase ( A , A ):
def __init__( self : Dict , A : List[str]) -> Any:
"""simple docstring"""
super().__init__(A)
super()._init_backbone(A)
_UpperCAmelCase = [config.embedding_size] + config.hidden_sizes
_UpperCAmelCase = ResNetEmbeddings(A)
_UpperCAmelCase = ResNetEncoder(A)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A)
@replace_return_docstrings(output_type=A , config_class=_CONFIG_FOR_DOC)
def _lowerCamelCase ( self : Optional[int] , A : Tensor , A : Optional[bool] = None , A : Optional[bool] = None) -> BackboneOutput:
"""simple docstring"""
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = self.embedder(A)
_UpperCAmelCase = self.encoder(A , output_hidden_states=A , return_dict=A)
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = ()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_UpperCAmelCase = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=A , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=A , )
| 639
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase__ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_UpperCAmelCase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_UpperCAmelCase = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_UpperCAmelCase = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
import re
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_conv_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_conv_in.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_encoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_proj_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_proj_out.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_decoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_prior_cond_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# keep original key
else:
_UpperCAmelCase = original_key
_UpperCAmelCase = replace_key(_UpperCAmelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_UpperCAmelCase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_UpperCAmelCase = original_key
_UpperCAmelCase = original_key
_UpperCAmelCase = value
return new_dict
@torch.no_grad()
def A ( _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None ) -> Dict:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_UpperCAmelCase = requests.get(F"{PREFIX}{file}" , allow_redirects=_UpperCAmelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_UpperCAmelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , 'wb' ).write(r.content )
_UpperCAmelCase = MODEL_MAPPING[model_name.split('/' )[-1]]
_UpperCAmelCase = JukeboxConfig.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = JukeboxModel(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = {}
for i, dict_name in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
_UpperCAmelCase = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_UpperCAmelCase = old_dic[k]
elif k.endswith('.w' ):
_UpperCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase = old_dic[k]
else:
_UpperCAmelCase = old_dic[k]
_UpperCAmelCase = 'vqvae' if i == 0 else F"priors.{3 - i}"
_UpperCAmelCase = fix_jukebox_keys(_UpperCAmelCase , model.state_dict() , _UpperCAmelCase , _UpperCAmelCase )
weight_dict.append(_UpperCAmelCase )
_UpperCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , 'w' ) as txtfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 639
| 1
|
from __future__ import annotations
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(__UpperCAmelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(__UpperCAmelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
|
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
a = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(s_dict.keys() )
for key in keys:
__SCREAMING_SNAKE_CASE = R""".*/layers_(\d+)"""
__SCREAMING_SNAKE_CASE = key
if re.match(__UpperCAmelCase , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = re.sub(R"""layers_(\d+)""" , R"""block/\1/layer""" , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = R"""(encoder|decoder)\/"""
if re.match(__UpperCAmelCase , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = re.match(__UpperCAmelCase , __UpperCAmelCase ).groups()
if groups[0] == "encoder":
__SCREAMING_SNAKE_CASE = re.sub(R"""/mlp/""" , R"""/1/mlp/""" , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/1/layer_norm/""" , __UpperCAmelCase )
elif groups[0] == "decoder":
__SCREAMING_SNAKE_CASE = re.sub(R"""/mlp/""" , R"""/2/mlp/""" , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/2/layer_norm/""" , __UpperCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__SCREAMING_SNAKE_CASE = new_key.replace(__UpperCAmelCase , __UpperCAmelCase )
print(f"""{key} -> {new_key}""" )
__SCREAMING_SNAKE_CASE = s_dict.pop(__UpperCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__SCREAMING_SNAKE_CASE = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__SCREAMING_SNAKE_CASE = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__SCREAMING_SNAKE_CASE = s_dict[key].shape[0]
__SCREAMING_SNAKE_CASE = s_dict[key]
for idx in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = expert_weihts[idx]
print(f"""{key} -> {key.replace('expert/' , 'nested fstring' )}""" )
s_dict.pop(__UpperCAmelCase )
return s_dict
a = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
import regex as re
with open(__UpperCAmelCase , """r""" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE = re.findall(R"""(.*) = ([0-9.]*)""" , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__SCREAMING_SNAKE_CASE = float(__UpperCAmelCase ) if """.""" in value else int(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = re.findall(R"""(.*activations) = \(\'(.*)\',\)""" , __UpperCAmelCase )[0]
__SCREAMING_SNAKE_CASE = str(activation[1] )
__SCREAMING_SNAKE_CASE = num_experts
__SCREAMING_SNAKE_CASE = SwitchTransformersConfig(**__UpperCAmelCase )
return config
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="./" , __UpperCAmelCase=8 ) -> str:
'''simple docstring'''
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
__SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(__UpperCAmelCase )
if gin_file is not None:
__SCREAMING_SNAKE_CASE = convert_gin_to_config(__UpperCAmelCase , __UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = SwitchTransformersConfig.from_pretrained(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = SwitchTransformersForConditionalGeneration(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = flax_params["""target"""]
__SCREAMING_SNAKE_CASE = flatten_dict(__UpperCAmelCase , sep="""/""" )
__SCREAMING_SNAKE_CASE = rename_keys(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = unflatten_dict(__UpperCAmelCase , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__UpperCAmelCase , __UpperCAmelCase )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
a = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 109
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = "open-llama"
def __init__( self , _a=1_0_0_0_0_0 , _a=4_0_9_6 , _a=1_1_0_0_8 , _a=3_2 , _a=3_2 , _a="silu" , _a=2_0_4_8 , _a=0.02 , _a=1e-6 , _a=True , _a=0 , _a=1 , _a=2 , _a=False , _a=True , _a=0.1 , _a=0.1 , _a=True , _a=True , _a=None , **_a , ) -> Dict:
_a : List[str] = vocab_size
_a : Tuple = max_position_embeddings
_a : List[Any] = hidden_size
_a : Optional[int] = intermediate_size
_a : List[str] = num_hidden_layers
_a : List[str] = num_attention_heads
_a : Dict = hidden_act
_a : Optional[Any] = initializer_range
_a : int = rms_norm_eps
_a : int = use_cache
_a : Any = kwargs.pop(
'''use_memorry_efficient_attention''' , _a )
_a : Any = hidden_dropout_prob
_a : Union[str, Any] = attention_dropout_prob
_a : Union[str, Any] = use_stable_embedding
_a : Tuple = shared_input_output_embedding
_a : List[str] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , tie_word_embeddings=_a , **_a , )
def __lowercase ( self ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
_a : str = self.rope_scaling.get('''type''' , _a )
_a : List[str] = self.rope_scaling.get('''factor''' , _a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_a , _a ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 578
|
def __UpperCAmelCase ( __a : int = 100 ) -> int:
"""simple docstring"""
_a : str = (n * (n + 1) // 2) ** 2
_a : int = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 578
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class a__ ( unittest.TestCase ):
def __init__( self : str , UpperCamelCase_ : str , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Optional[int]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Optional[int]=99 , UpperCamelCase_ : Dict=32 , UpperCamelCase_ : int=5 , UpperCamelCase_ : int=4 , UpperCamelCase_ : List[str]=37 , UpperCamelCase_ : str="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : Any=4 , ):
"""simple docstring"""
__UpperCAmelCase : str = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : Dict = seq_length
__UpperCAmelCase : List[str] = is_training
__UpperCAmelCase : List[Any] = use_attention_mask
__UpperCAmelCase : Union[str, Any] = use_token_type_ids
__UpperCAmelCase : Dict = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Any = max_position_embeddings
__UpperCAmelCase : Any = type_vocab_size
__UpperCAmelCase : Optional[int] = type_sequence_label_size
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : str = num_choices
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Union[str, Any] = None
if self.use_attention_mask:
__UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Dict = None
if self.use_token_type_ids:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Union[str, Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = config_and_inputs
__UpperCAmelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : str = True
__UpperCAmelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = True
lowercase_ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : List[Any] = FlaxBertModelTester(self)
@slow
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : Any = FlaxBertModel.from_pretrained("bert-base-cased")
__UpperCAmelCase : Union[str, Any] = model(np.ones((1, 1)))
self.assertIsNotNone(UpperCamelCase_)
| 77
|
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 104
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __A ( A_ ):
UpperCamelCase :List[Any] = '''wavlm'''
def __init__(self , __magic_name__=32 , __magic_name__=768 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3072 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=1E-5 , __magic_name__="group" , __magic_name__="gelu" , __magic_name__=(512, 512, 512, 512, 512, 512, 512) , __magic_name__=(5, 2, 2, 2, 2, 2, 2) , __magic_name__=(10, 3, 3, 3, 3, 2, 2) , __magic_name__=False , __magic_name__=128 , __magic_name__=16 , __magic_name__=320 , __magic_name__=800 , __magic_name__=False , __magic_name__=True , __magic_name__=0.05 , __magic_name__=10 , __magic_name__=2 , __magic_name__=0.0 , __magic_name__=10 , __magic_name__=320 , __magic_name__=2 , __magic_name__=0.1 , __magic_name__=100 , __magic_name__=256 , __magic_name__=256 , __magic_name__=0.1 , __magic_name__="mean" , __magic_name__=False , __magic_name__=False , __magic_name__=256 , __magic_name__=(512, 512, 512, 512, 1500) , __magic_name__=(5, 3, 3, 1, 1) , __magic_name__=(1, 2, 3, 1, 1) , __magic_name__=512 , __magic_name__=80 , __magic_name__=0 , __magic_name__=1 , __magic_name__=2 , __magic_name__=False , __magic_name__=3 , __magic_name__=2 , __magic_name__=3 , __magic_name__=None , **__magic_name__ , ):
super().__init__(**__magic_name__ , pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ )
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : Any = feat_extract_norm
lowerCamelCase__ : Any = feat_extract_activation
lowerCamelCase__ : Dict = list(__magic_name__ )
lowerCamelCase__ : str = list(__magic_name__ )
lowerCamelCase__ : Union[str, Any] = list(__magic_name__ )
lowerCamelCase__ : Any = conv_bias
lowerCamelCase__ : Tuple = num_buckets
lowerCamelCase__ : List[str] = max_bucket_distance
lowerCamelCase__ : str = num_conv_pos_embeddings
lowerCamelCase__ : str = num_conv_pos_embedding_groups
lowerCamelCase__ : List[Any] = len(self.conv_dim )
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : List[Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : Any = hidden_dropout
lowerCamelCase__ : List[Any] = attention_dropout
lowerCamelCase__ : Union[str, Any] = activation_dropout
lowerCamelCase__ : Optional[Any] = feat_proj_dropout
lowerCamelCase__ : List[Any] = final_dropout
lowerCamelCase__ : List[Any] = layerdrop
lowerCamelCase__ : int = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : int = num_ctc_classes
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : Any = do_stable_layer_norm
lowerCamelCase__ : int = use_weighted_layer_sum
lowerCamelCase__ : int = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__ : Any = apply_spec_augment
lowerCamelCase__ : str = mask_time_prob
lowerCamelCase__ : str = mask_time_length
lowerCamelCase__ : List[str] = mask_time_min_masks
lowerCamelCase__ : Optional[Any] = mask_feature_prob
lowerCamelCase__ : str = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowerCamelCase__ : int = num_codevectors_per_group
lowerCamelCase__ : Union[str, Any] = num_codevector_groups
lowerCamelCase__ : Optional[Any] = contrastive_logits_temperature
lowerCamelCase__ : str = num_negatives
lowerCamelCase__ : Dict = codevector_dim
lowerCamelCase__ : Tuple = proj_codevector_dim
lowerCamelCase__ : Any = diversity_loss_weight
# ctc loss
lowerCamelCase__ : int = ctc_loss_reduction
lowerCamelCase__ : Union[str, Any] = ctc_zero_infinity
# adapter
lowerCamelCase__ : List[str] = add_adapter
lowerCamelCase__ : Optional[int] = adapter_kernel_size
lowerCamelCase__ : Any = adapter_stride
lowerCamelCase__ : int = num_adapter_layers
lowerCamelCase__ : Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCamelCase__ : Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCamelCase__ : List[str] = list(__magic_name__ )
lowerCamelCase__ : Tuple = list(__magic_name__ )
lowerCamelCase__ : Tuple = list(__magic_name__ )
lowerCamelCase__ : Union[str, Any] = xvector_output_dim
@property
def _snake_case (self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 96
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_lowercase = logging.get_logger(__name__)
def _A (UpperCamelCase : Dict , UpperCamelCase : Optional[int] ) ->Tuple:
'''simple docstring'''
lowerCamelCase__ : List[str] = nn.functional.normalize(UpperCamelCase )
lowerCamelCase__ : Tuple = nn.functional.normalize(UpperCamelCase )
return torch.mm(UpperCamelCase , normalized_text_embeds.t() )
class __A ( A_ ):
UpperCamelCase :Optional[Any] = CLIPConfig
UpperCamelCase :Optional[Any] = ['''CLIPEncoderLayer''']
def __init__(self , __magic_name__ ):
super().__init__(__magic_name__ )
lowerCamelCase__ : Union[str, Any] = CLIPVisionModel(config.vision_config )
lowerCamelCase__ : Any = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__magic_name__ )
lowerCamelCase__ : Any = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__magic_name__ )
lowerCamelCase__ : Union[str, Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__magic_name__ )
lowerCamelCase__ : Any = nn.Parameter(torch.ones(17 ) , requires_grad=__magic_name__ )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__magic_name__ )
@torch.no_grad()
def _snake_case (self , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : Any = self.vision_model(__magic_name__ )[1] # pooled_output
lowerCamelCase__ : Tuple = self.visual_projection(__magic_name__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase__ : Any = cosine_distance(__magic_name__ , self.special_care_embeds ).cpu().float().numpy()
lowerCamelCase__ : str = cosine_distance(__magic_name__ , self.concept_embeds ).cpu().float().numpy()
lowerCamelCase__ : int = []
lowerCamelCase__ : str = image_embeds.shape[0]
for i in range(__magic_name__ ):
lowerCamelCase__ : str = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase__ : Optional[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
lowerCamelCase__ : List[str] = special_cos_dist[i][concept_idx]
lowerCamelCase__ : Tuple = self.special_care_embeds_weights[concept_idx].item()
lowerCamelCase__ : str = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
lowerCamelCase__ : List[str] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
lowerCamelCase__ : str = cos_dist[i][concept_idx]
lowerCamelCase__ : Dict = self.concept_embeds_weights[concept_idx].item()
lowerCamelCase__ : int = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__magic_name__ )
result.append(__magic_name__ )
lowerCamelCase__ : Tuple = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _snake_case (self , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : Union[str, Any] = self.vision_model(__magic_name__ )[1] # pooled_output
lowerCamelCase__ : str = self.visual_projection(__magic_name__ )
lowerCamelCase__ : Optional[Any] = cosine_distance(__magic_name__ , self.special_care_embeds )
lowerCamelCase__ : Dict = cosine_distance(__magic_name__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase__ : Any = 0.0
lowerCamelCase__ : Dict = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
lowerCamelCase__ : Union[str, Any] = torch.any(special_scores > 0 , dim=1 )
lowerCamelCase__ : Optional[Any] = special_care * 0.01
lowerCamelCase__ : Dict = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
lowerCamelCase__ : Tuple = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
lowerCamelCase__ : Any = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 96
| 1
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Dict = UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('DownBlock2D', 'AttnDownBlock2D'), up_block_types=('AttnUpBlock2D', 'UpBlock2D'), )
return model
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[str] = self.dummy_uncond_unet
_lowercase : Tuple = ScoreSdeVeScheduler()
_lowercase : str = ScoreSdeVePipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
sde_ve.to(lowerCamelCase)
sde_ve.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = torch.manual_seed(0)
_lowercase : Tuple = sde_ve(num_inference_steps=2, output_type='numpy', generator=lowerCamelCase).images
_lowercase : Any = torch.manual_seed(0)
_lowercase : Any = sde_ve(num_inference_steps=2, output_type='numpy', generator=lowerCamelCase, return_dict=lowerCamelCase)[
0
]
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
_lowercase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = 'google/ncsnpp-church-256'
_lowercase : Dict = UNetaDModel.from_pretrained(lowerCamelCase)
_lowercase : Dict = ScoreSdeVeScheduler.from_pretrained(lowerCamelCase)
_lowercase : Union[str, Any] = ScoreSdeVePipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
sde_ve.to(lowerCamelCase)
sde_ve.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = torch.manual_seed(0)
_lowercase : str = sde_ve(num_inference_steps=10, output_type='numpy', generator=lowerCamelCase).images
_lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_lowercase : Dict = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 89
|
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 100 ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = set()
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Optional[int] = n + 1 # maximum limit
for a in range(2 , _lowerCamelCase ):
for b in range(2 , _lowerCamelCase ):
_lowerCamelCase : List[str] = a**b # calculates the current power
collect_powers.add(_lowerCamelCase ) # adds the result to the set
return len(_lowerCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 46
| 0
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ = logging.getLogger(__name__)
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : Union[str, Any] ) -> List[Any]:
return (preds == labels).mean()
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
A_ : str = field(metadata={'help': 'Should contain the data files for the task.'} )
A_ : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : bool = field(
default=UpperCAmelCase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowerCamelCase__ ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
__snake_case = processors[data_args.task_name]()
__snake_case = processor.get_labels()
__snake_case = len(__UpperCamelCase )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
__snake_case = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__snake_case = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(snake_case_ : Optional[int] ) -> Dict:
__snake_case = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )}
# Data collator
__snake_case = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__snake_case = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case = trainer.evaluate()
__snake_case = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __UpperCamelCase , __UpperCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__UpperCamelCase )
return results
def lowerCamelCase__ ( snake_case_ : Any ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 708
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : Tuple ) -> Dict:
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
__snake_case = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert('''RGB''' )
__snake_case = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
__snake_case = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ )
return image
def lowerCamelCase__ ( snake_case_ : int ) -> List[str]:
if "visual_encoder" in key:
__snake_case = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , snake_case_ )
if "blocks" in key:
__snake_case = re.sub(R'''blocks''' , '''layers''' , snake_case_ )
if "attn" in key:
__snake_case = re.sub(R'''attn''' , '''self_attn''' , snake_case_ )
if "norm1" in key:
__snake_case = re.sub(R'''norm1''' , '''layer_norm1''' , snake_case_ )
if "norm2" in key:
__snake_case = re.sub(R'''norm2''' , '''layer_norm2''' , snake_case_ )
if "encoder.norm" in key:
__snake_case = re.sub(R'''encoder.norm''' , '''post_layernorm''' , snake_case_ )
if "encoder.patch_embed.proj" in key:
__snake_case = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , snake_case_ )
if "encoder.pos_embed" in key:
__snake_case = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , snake_case_ )
if "encoder.cls_token" in key:
__snake_case = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , snake_case_ )
if "self_attn" in key:
__snake_case = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , snake_case_ )
return key
@torch.no_grad()
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : str=None ) -> str:
if config_path is not None:
__snake_case = BlipConfig.from_pretrained(snake_case_ )
else:
__snake_case = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
__snake_case = BlipForConditionalGeneration(snake_case_ ).eval()
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
__snake_case = blip_decoder(pretrained=snake_case_ , image_size=384 , vit='''base''' )
__snake_case = pt_model.eval()
__snake_case = pt_model.state_dict()
for key in modified_state_dict.copy():
__snake_case = modified_state_dict.pop(snake_case_ )
__snake_case = rename_key(snake_case_ )
__snake_case = value
hf_model.load_state_dict(snake_case_ )
__snake_case = 384
__snake_case = load_demo_image(image_size=snake_case_ , device='''cpu''' )
__snake_case = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__snake_case = tokenizer(['''a picture of'''] ).input_ids
__snake_case = hf_model.generate(snake_case_ , snake_case_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
__snake_case = hf_model.generate(snake_case_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__snake_case = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
__snake_case = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit='''base''' )
vqa_model.eval()
__snake_case = vqa_model.state_dict()
for key in modified_state_dict.copy():
__snake_case = modified_state_dict.pop(snake_case_ )
__snake_case = rename_key(snake_case_ )
__snake_case = value
__snake_case = BlipForQuestionAnswering(snake_case_ )
hf_vqa_model.load_state_dict(snake_case_ )
__snake_case = ['''How many dogs are in this image?''']
__snake_case = tokenizer(snake_case_ , return_tensors='''pt''' ).input_ids
__snake_case = hf_vqa_model.generate(snake_case_ , snake_case_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
__snake_case = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit='''base''' )
itm_model.eval()
__snake_case = itm_model.state_dict()
for key in modified_state_dict.copy():
__snake_case = modified_state_dict.pop(snake_case_ )
__snake_case = rename_key(snake_case_ )
__snake_case = value
__snake_case = BlipForImageTextRetrieval(snake_case_ )
__snake_case = ['''A picture of a woman with a dog sitting in a beach''']
__snake_case = tokenizer(
snake_case_ , return_tensors='''pt''' , padding='''max_length''' , truncation=snake_case_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case_ )
hf_itm_model.eval()
__snake_case = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
__snake_case = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
snake_case_ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 388
| 0
|
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
lowercase_ = 3_00 # TEMPERATURE (unit = K)
def UpperCAmelCase ( _lowercase : float , _lowercase : float , _lowercase : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 552
|
"""simple docstring"""
def UpperCAmelCase ( _lowercase : int = 1_0_0_0 ) -> int:
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = 1, 1
lowerCAmelCase_ = []
for i in range(1 , n + 1 ):
lowerCAmelCase_ = prev_numerator + 2 * prev_denominator
lowerCAmelCase_ = prev_numerator + prev_denominator
if len(str(_lowercase ) ) > len(str(_lowercase ) ):
result.append(_lowercase )
lowerCAmelCase_ = numerator
lowerCAmelCase_ = denominator
return len(_lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 552
| 1
|
from pathlib import Path
import fire
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Path(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = Path(__UpperCamelCase )
dest_dir.mkdir(exist_ok=__UpperCamelCase )
for path in src_dir.iterdir():
SCREAMING_SNAKE_CASE__ = [x.rstrip() for x in list(path.open().readlines() )][:n]
SCREAMING_SNAKE_CASE__ = dest_dir.joinpath(path.name )
print(__UpperCamelCase )
dest_path.open("""w""" ).write("""\n""".join(__UpperCamelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 379
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __snake_case :
def __init__( self : Union[str, Any] , _lowercase : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = data
SCREAMING_SNAKE_CASE__ = None
class __snake_case :
def __init__( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
def __iter__( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.head
while self.head:
yield node.data
SCREAMING_SNAKE_CASE__ = node.next
if node == self.head:
break
def __len__( self : int ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : Optional[int] ):
"""simple docstring"""
return "->".join(str(_lowercase ) for item in iter(self ) )
def __a ( self : Optional[int] , _lowercase : Any ):
"""simple docstring"""
self.insert_nth(len(self ) , _lowercase )
def __a ( self : Tuple , _lowercase : Any ):
"""simple docstring"""
self.insert_nth(0 , _lowercase )
def __a ( self : Tuple , _lowercase : int , _lowercase : Any ):
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError("""list index out of range.""" )
SCREAMING_SNAKE_CASE__ = Node(_lowercase )
if self.head is None:
SCREAMING_SNAKE_CASE__ = new_node # first node points itself
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = new_node
elif index == 0: # insert at head
SCREAMING_SNAKE_CASE__ = self.head
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = new_node
else:
SCREAMING_SNAKE_CASE__ = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE__ = temp.next
SCREAMING_SNAKE_CASE__ = temp.next
SCREAMING_SNAKE_CASE__ = new_node
if index == len(self ) - 1: # insert at tail
SCREAMING_SNAKE_CASE__ = new_node
def __a ( self : Dict ):
"""simple docstring"""
return self.delete_nth(0 )
def __a ( self : str ):
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def __a ( self : str , _lowercase : int = 0 ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError("""list index out of range.""" )
SCREAMING_SNAKE_CASE__ = self.head
if self.head == self.tail: # just one node
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = None
elif index == 0: # delete head node
SCREAMING_SNAKE_CASE__ = self.tail.next.next
SCREAMING_SNAKE_CASE__ = self.head.next
else:
SCREAMING_SNAKE_CASE__ = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE__ = temp.next
SCREAMING_SNAKE_CASE__ = temp.next
SCREAMING_SNAKE_CASE__ = temp.next.next
if index == len(self ) - 1: # delete at tail
SCREAMING_SNAKE_CASE__ = temp
return delete_node.data
def __a ( self : Optional[int] ):
"""simple docstring"""
return len(self ) == 0
def __SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = CircularLinkedList()
assert len(__UpperCamelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(__UpperCamelCase ) == i
circular_linked_list.insert_nth(__UpperCamelCase , i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 379
| 1
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
if radian_mode:
return [magnitude * cos(__snake_case ), magnitude * sin(__snake_case )]
return [magnitude * cos(radians(__snake_case ) ), magnitude * sin(radians(__snake_case ) )]
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 10**-1 ):
a__ : NDArray[floataa] = cross(__snake_case , __snake_case )
a__ : float = sum(__snake_case )
return abs(__snake_case ) < eps
if __name__ == "__main__":
# Test to check if it works
SCREAMING_SNAKE_CASE__ : List[Any] = array(
[
polar_force(718.4, 1_8_0 - 3_0),
polar_force(879.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
SCREAMING_SNAKE_CASE__ : List[Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
SCREAMING_SNAKE_CASE__ : int = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
SCREAMING_SNAKE_CASE__ : Tuple = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
SCREAMING_SNAKE_CASE__ : List[str] = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
SCREAMING_SNAKE_CASE__ : str = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 112
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class lowercase__ :
__UpperCAmelCase = field(
default='''cifar10''' ,metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''The column name of the images in the files.'''} )
__UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the training data.'''} )
__UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the validation data.'''} )
__UpperCAmelCase = field(
default=0.1_5 ,metadata={'''help''': '''Percent to split off of train for validation.'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} ,)
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} ,)
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : Any = {}
if self.train_dir is not None:
_lowerCamelCase : int = self.train_dir
if self.validation_dir is not None:
_lowerCamelCase : Tuple = self.validation_dir
_lowerCamelCase : Optional[int] = data_files if data_files else None
@dataclass
class lowercase__ :
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} ,)
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} ,)
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
__UpperCAmelCase = field(
default='''main''' ,metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} ,)
__UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''Name or path of preprocessor config.'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} ,)
__UpperCAmelCase = field(
default=0.7_5 ,metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class lowercase__ ( A_ ):
__UpperCAmelCase = field(
default=1e-3 ,metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def _snake_case ( __snake_case : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , __snake_case , __snake_case )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCamelCase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_lowerCamelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
_lowerCamelCase : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_lowerCamelCase : Tuple = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0:
_lowerCamelCase : List[str] = ds["""train"""].train_test_split(data_args.train_val_split )
_lowerCamelCase : Union[str, Any] = split["""train"""]
_lowerCamelCase : Optional[int] = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : str = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
_lowerCamelCase : Dict = ViTMAEConfig.from_pretrained(model_args.config_name , **__snake_case )
elif model_args.model_name_or_path:
_lowerCamelCase : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
_lowerCamelCase : Optional[Any] = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_lowerCamelCase : str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__snake_case )
elif model_args.model_name_or_path:
_lowerCamelCase : Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_lowerCamelCase : List[Any] = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
_lowerCamelCase : Union[str, Any] = ViTMAEForPreTraining(__snake_case )
if training_args.do_train:
_lowerCamelCase : List[Any] = ds["""train"""].column_names
else:
_lowerCamelCase : Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
_lowerCamelCase : str = data_args.image_column_name
elif "image" in column_names:
_lowerCamelCase : Optional[Any] = """image"""
elif "img" in column_names:
_lowerCamelCase : List[Any] = """img"""
else:
_lowerCamelCase : str = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_lowerCamelCase : Dict = image_processor.size["""shortest_edge"""]
else:
_lowerCamelCase : List[Any] = (image_processor.size["""height"""], image_processor.size["""width"""])
_lowerCamelCase : Tuple = Compose(
[
Lambda(lambda __snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__snake_case : Optional[Any] ):
_lowerCamelCase : Dict = [transforms(__snake_case ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
_lowerCamelCase : int = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
_lowerCamelCase : Union[str, Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__snake_case )
# Compute absolute learning rate
_lowerCamelCase : Optional[Any] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_lowerCamelCase : Tuple = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
_lowerCamelCase : Optional[Any] = Trainer(
model=__snake_case , args=__snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
_lowerCamelCase : Any = None
if training_args.resume_from_checkpoint is not None:
_lowerCamelCase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCamelCase : Union[str, Any] = last_checkpoint
_lowerCamelCase : Optional[Any] = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCamelCase : int = trainer.evaluate()
trainer.log_metrics("""eval""" , __snake_case )
trainer.save_metrics("""eval""" , __snake_case )
# Write model card and (optionally) push to hub
_lowerCamelCase : Optional[Any] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
def _snake_case ( __snake_case : Dict ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 88
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE , ) -> List[str]:
__lowerCAmelCase : Dict = parent
__lowerCAmelCase : int = 13
__lowerCAmelCase : Any = 7
__lowerCAmelCase : Tuple = True
__lowerCAmelCase : str = True
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : Union[str, Any] = True
__lowerCAmelCase : Tuple = 99
__lowerCAmelCase : Union[str, Any] = 32
__lowerCAmelCase : str = 2
__lowerCAmelCase : Union[str, Any] = 4
__lowerCAmelCase : Union[str, Any] = 37
__lowerCAmelCase : List[str] = 'gelu'
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : List[str] = 0.1
__lowerCAmelCase : int = 5_12
__lowerCAmelCase : Optional[int] = 16
__lowerCAmelCase : Dict = 2
__lowerCAmelCase : Union[str, Any] = 0.0_2
__lowerCAmelCase : Union[str, Any] = 3
__lowerCAmelCase : List[str] = 4
__lowerCAmelCase : int = None
def snake_case ( self ) -> List[str]:
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : int = None
if self.use_input_mask:
__lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : List[Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
__lowerCAmelCase : Tuple = TFDistilBertModel(config=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
__lowerCAmelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = [input_ids, input_mask]
__lowerCAmelCase : int = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
__lowerCAmelCase : List[Any] = TFDistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = {'input_ids': input_ids, 'attention_mask': input_mask}
__lowerCAmelCase : Tuple = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
__lowerCAmelCase : int = TFDistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
__lowerCAmelCase : str = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
__lowerCAmelCase : str = self.num_labels
__lowerCAmelCase : Tuple = TFDistilBertForSequenceClassification(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
__lowerCAmelCase : Optional[int] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase : Dict = self.num_choices
__lowerCAmelCase : Union[str, Any] = TFDistilBertForMultipleChoice(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
__lowerCAmelCase : Tuple = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
__lowerCAmelCase : Tuple = self.num_labels
__lowerCAmelCase : Dict = TFDistilBertForTokenClassification(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
__lowerCAmelCase : str = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self ) -> Union[str, Any]:
__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) : int = config_and_inputs
__lowerCAmelCase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( a , a , unittest.TestCase ):
'''simple docstring'''
_snake_case = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_snake_case = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case = False
_snake_case = False
def snake_case ( self ) -> Optional[Any]:
__lowerCAmelCase : Optional[Any] = TFDistilBertModelTester(self )
__lowerCAmelCase : Any = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , dim=37 )
def snake_case ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case ( self ) -> Optional[Any]:
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self ) -> Union[str, Any]:
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE )
def snake_case ( self ) -> Optional[int]:
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE )
def snake_case ( self ) -> Union[str, Any]:
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def snake_case ( self ) -> List[str]:
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE )
def snake_case ( self ) -> str:
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self ) -> int:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__lowerCAmelCase : List[str] = TFDistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self ) -> Any:
__lowerCAmelCase : Dict = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
__lowerCAmelCase : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase : Tuple = model(SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Union[str, Any] = [1, 6, 7_68]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
| 123
|
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__ ( a , unittest.TestCase ):
'''simple docstring'''
_snake_case = OpenAIGPTTokenizer
_snake_case = OpenAIGPTTokenizerFast
_snake_case = True
_snake_case = False
def snake_case ( self ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__lowerCAmelCase : Tuple = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
__lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> Any:
return "lower newer", "lower newer"
def snake_case ( self ) -> List[str]:
__lowerCAmelCase : List[str] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__lowerCAmelCase : List[str] = 'lower'
__lowerCAmelCase : Union[str, Any] = ['low', 'er</w>']
__lowerCAmelCase : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = tokens + ['<unk>']
__lowerCAmelCase : int = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self , SCREAMING_SNAKE_CASE=15 ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Simple input
__lowerCAmelCase : Optional[Any] = 'This is a simple input'
__lowerCAmelCase : Union[str, Any] = ['This is a simple input 1', 'This is a simple input 2']
__lowerCAmelCase : int = ('This is a simple input', 'This is a pair')
__lowerCAmelCase : Optional[Any] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' , )
def snake_case ( self ) -> int:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCamelCase__ ( a ):
'''simple docstring'''
pass
| 123
| 1
|
from __future__ import annotations
import time
a_ = list[tuple[int, int]]
a_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , a : int , a : int , a : int , a : int , a : Node | None ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = pos_x
SCREAMING_SNAKE_CASE : Dict = pos_y
SCREAMING_SNAKE_CASE : int = (pos_y, pos_x)
SCREAMING_SNAKE_CASE : Any = goal_x
SCREAMING_SNAKE_CASE : Dict = goal_y
SCREAMING_SNAKE_CASE : str = parent
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : tuple[int, int] , a : tuple[int, int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = Node(start[1] , start[0] , goal[1] , goal[0] , a )
SCREAMING_SNAKE_CASE : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , a )
SCREAMING_SNAKE_CASE : str = [self.start]
SCREAMING_SNAKE_CASE : Any = False
def __UpperCamelCase ( self : str ) -> Path | None:
"""simple docstring"""
while self.node_queue:
SCREAMING_SNAKE_CASE : List[str] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE : Tuple = True
return self.retrace_path(a )
SCREAMING_SNAKE_CASE : str = self.get_successors(a )
for node in successors:
self.node_queue.append(a )
if not self.reached:
return [self.start.pos]
return None
def __UpperCamelCase ( self : Dict , a : Node ) -> list[Node]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = []
for action in delta:
SCREAMING_SNAKE_CASE : int = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(a , a , self.target.pos_y , self.target.pos_x , a ) )
return successors
def __UpperCamelCase ( self : Union[str, Any] , a : Node | None ) -> Path:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = node
SCREAMING_SNAKE_CASE : List[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE : Dict = current_node.parent
path.reverse()
return path
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : str , a : Optional[int] , a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = BreadthFirstSearch(a , a )
SCREAMING_SNAKE_CASE : Any = BreadthFirstSearch(a , a )
SCREAMING_SNAKE_CASE : Dict = False
def __UpperCamelCase ( self : List[Any] ) -> Path | None:
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
SCREAMING_SNAKE_CASE : Optional[Any] = self.fwd_bfs.node_queue.pop(0 )
SCREAMING_SNAKE_CASE : Any = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
SCREAMING_SNAKE_CASE : int = True
return self.retrace_bidirectional_path(
a , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = current_bwd_node
SCREAMING_SNAKE_CASE : str = current_fwd_node
SCREAMING_SNAKE_CASE : Union[str, Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(a ),
self.bwd_bfs: self.bwd_bfs.get_successors(a ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(a )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def __UpperCamelCase ( self : int , a : Node , a : Node ) -> Path:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.fwd_bfs.retrace_path(a )
SCREAMING_SNAKE_CASE : str = self.bwd_bfs.retrace_path(a )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE : Any = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a_ = (0, 0)
a_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a_ = time.time()
a_ = BreadthFirstSearch(init, goal)
a_ = bfs.search()
a_ = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
a_ = time.time()
a_ = BidirectionalBreadthFirstSearch(init, goal)
a_ = bd_bfs.search()
a_ = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 25
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 25
| 1
|
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if height >= 1:
move_tower(height - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
move_disk(UpperCamelCase__ , UpperCamelCase__ )
move_tower(height - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
print("""moving disk from""" , UpperCamelCase__ , """to""" , UpperCamelCase__ )
def _UpperCamelCase ( ):
UpperCAmelCase__ : List[str] = int(input("""Height of hanoi: """ ).strip() )
move_tower(UpperCamelCase__ , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 713
|
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
UpperCAmelCase__ : List[str] = len(UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = max(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = min(UpperCamelCase__ )
# create the counting array
UpperCAmelCase__ : Dict = coll_max + 1 - coll_min
UpperCAmelCase__ : Optional[int] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
UpperCAmelCase__ : List[str] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
UpperCAmelCase__ : Tuple = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
UpperCAmelCase__ : Any = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _UpperCamelCase ( UpperCamelCase__ ):
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
__A =input('Enter numbers separated by a comma:\n').strip()
__A =[int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 113
| 0
|
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=0.0 , lowercase_ = None , lowercase_ = "geglu" , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = True , lowercase_ = "layer_norm" , lowercase_ = False , ) -> List[Any]:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ = only_cross_attention
lowerCAmelCase_ = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
lowerCAmelCase_ = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
lowerCAmelCase_ = AdaLayerNorm(lowercase_ , lowercase_ )
elif self.use_ada_layer_norm_zero:
lowerCAmelCase_ = AdaLayerNormZero(lowercase_ , lowercase_ )
else:
lowerCAmelCase_ = nn.LayerNorm(lowercase_ , elementwise_affine=lowercase_ )
lowerCAmelCase_ = Attention(
query_dim=lowercase_ , heads=lowercase_ , dim_head=lowercase_ , dropout=lowercase_ , bias=lowercase_ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=lowercase_ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
lowerCAmelCase_ = (
AdaLayerNorm(lowercase_ , lowercase_ )
if self.use_ada_layer_norm
else nn.LayerNorm(lowercase_ , elementwise_affine=lowercase_ )
)
lowerCAmelCase_ = Attention(
query_dim=lowercase_ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=lowercase_ , dim_head=lowercase_ , dropout=lowercase_ , bias=lowercase_ , upcast_attention=lowercase_ , ) # is self-attn if encoder_hidden_states is none
else:
lowerCAmelCase_ = None
lowerCAmelCase_ = None
# 3. Feed-forward
lowerCAmelCase_ = nn.LayerNorm(lowercase_ , elementwise_affine=lowercase_ )
lowerCAmelCase_ = FeedForward(lowercase_ , dropout=lowercase_ , activation_fn=lowercase_ , final_dropout=lowercase_ )
# let chunk size default to None
lowerCAmelCase_ = None
lowerCAmelCase_ = 0
def _lowercase ( self , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = chunk_size
lowerCAmelCase_ = dim
def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , ) -> Optional[Any]:
'''simple docstring'''
if self.use_ada_layer_norm:
lowerCAmelCase_ = self.norma(lowercase_ , lowercase_ )
elif self.use_ada_layer_norm_zero:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.norma(
lowercase_ , lowercase_ , lowercase_ , hidden_dtype=hidden_states.dtype )
else:
lowerCAmelCase_ = self.norma(lowercase_ )
lowerCAmelCase_ = cross_attention_kwargs if cross_attention_kwargs is not None else {}
lowerCAmelCase_ = self.attna(
lowercase_ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=lowercase_ , **lowercase_ , )
if self.use_ada_layer_norm_zero:
lowerCAmelCase_ = gate_msa.unsqueeze(1 ) * attn_output
lowerCAmelCase_ = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
lowerCAmelCase_ = (
self.norma(lowercase_ , lowercase_ ) if self.use_ada_layer_norm else self.norma(lowercase_ )
)
lowerCAmelCase_ = self.attna(
lowercase_ , encoder_hidden_states=lowercase_ , attention_mask=lowercase_ , **lowercase_ , )
lowerCAmelCase_ = attn_output + hidden_states
# 3. Feed-forward
lowerCAmelCase_ = self.norma(lowercase_ )
if self.use_ada_layer_norm_zero:
lowerCAmelCase_ = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
lowerCAmelCase_ = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
lowerCAmelCase_ = torch.cat(
[self.ff(lowercase_ ) for hid_slice in norm_hidden_states.chunk(lowercase_ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
lowerCAmelCase_ = self.ff(lowercase_ )
if self.use_ada_layer_norm_zero:
lowerCAmelCase_ = gate_mlp.unsqueeze(1 ) * ff_output
lowerCAmelCase_ = ff_output + hidden_states
return hidden_states
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ = None , lowercase_ = 4 , lowercase_ = 0.0 , lowercase_ = "geglu" , lowercase_ = False , ) -> Tuple:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ = int(dim * mult )
lowerCAmelCase_ = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
lowerCAmelCase_ = GELU(lowercase_ , lowercase_ )
if activation_fn == "gelu-approximate":
lowerCAmelCase_ = GELU(lowercase_ , lowercase_ , approximate='tanh' )
elif activation_fn == "geglu":
lowerCAmelCase_ = GEGLU(lowercase_ , lowercase_ )
elif activation_fn == "geglu-approximate":
lowerCAmelCase_ = ApproximateGELU(lowercase_ , lowercase_ )
lowerCAmelCase_ = nn.ModuleList([] )
# project in
self.net.append(lowercase_ )
# project dropout
self.net.append(nn.Dropout(lowercase_ ) )
# project out
self.net.append(nn.Linear(lowercase_ , lowercase_ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(lowercase_ ) )
def _lowercase ( self , lowercase_ ) -> Tuple:
'''simple docstring'''
for module in self.net:
lowerCAmelCase_ = module(lowercase_ )
return hidden_states
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ = "none" ) -> Dict:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ = nn.Linear(lowercase_ , lowercase_ )
lowerCAmelCase_ = approximate
def _lowercase ( self , lowercase_ ) -> Optional[int]:
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(lowercase_ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _lowercase ( self , lowercase_ ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = self.proj(lowercase_ )
lowerCAmelCase_ = self.gelu(lowercase_ )
return hidden_states
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ = nn.Linear(lowercase_ , dim_out * 2 )
def _lowercase ( self , lowercase_ ) -> List[Any]:
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(lowercase_ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _lowercase ( self , lowercase_ ) -> str:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.proj(lowercase_ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(lowercase_ )
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ = nn.Linear(lowercase_ , lowercase_ )
def _lowercase ( self , lowercase_ ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = self.proj(lowercase_ )
return x * torch.sigmoid(1.7_02 * x )
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ = nn.Embedding(lowercase_ , lowercase_ )
lowerCAmelCase_ = nn.SiLU()
lowerCAmelCase_ = nn.Linear(lowercase_ , embedding_dim * 2 )
lowerCAmelCase_ = nn.LayerNorm(lowercase_ , elementwise_affine=lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = self.linear(self.silu(self.emb(lowercase_ ) ) )
lowerCAmelCase_ , lowerCAmelCase_ = torch.chunk(lowercase_ , 2 )
lowerCAmelCase_ = self.norm(lowercase_ ) * (1 + scale) + shift
return x
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ = CombinedTimestepLabelEmbeddings(lowercase_ , lowercase_ )
lowerCAmelCase_ = nn.SiLU()
lowerCAmelCase_ = nn.Linear(lowercase_ , 6 * embedding_dim , bias=lowercase_ )
lowerCAmelCase_ = nn.LayerNorm(lowercase_ , elementwise_affine=lowercase_ , eps=1e-6 )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = self.linear(self.silu(self.emb(lowercase_ , lowercase_ , hidden_dtype=lowercase_ ) ) )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = emb.chunk(6 , dim=1 )
lowerCAmelCase_ = self.norm(lowercase_ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = 1e-5 ) -> List[str]:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ = num_groups
lowerCAmelCase_ = eps
if act_fn is None:
lowerCAmelCase_ = None
else:
lowerCAmelCase_ = get_activation(lowercase_ )
lowerCAmelCase_ = nn.Linear(lowercase_ , out_dim * 2 )
def _lowercase ( self , lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
if self.act:
lowerCAmelCase_ = self.act(lowercase_ )
lowerCAmelCase_ = self.linear(lowercase_ )
lowerCAmelCase_ = emb[:, :, None, None]
lowerCAmelCase_ , lowerCAmelCase_ = emb.chunk(2 , dim=1 )
lowerCAmelCase_ = F.group_norm(lowercase_ , self.num_groups , eps=self.eps )
lowerCAmelCase_ = x * (1 + scale) + shift
return x
| 318
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class a_ ( a_ , a_ ):
'''simple docstring'''
__a: Union[str, Any] = '''resnet'''
__a: List[Any] = ['''basic''', '''bottleneck''']
def __init__( self , lowercase_=3 , lowercase_=6_4 , lowercase_=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , lowercase_=[3, 4, 6, 3] , lowercase_="bottleneck" , lowercase_="relu" , lowercase_=False , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowercase_ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = embedding_size
lowerCAmelCase_ = hidden_sizes
lowerCAmelCase_ = depths
lowerCAmelCase_ = layer_type
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = downsample_in_first_stage
lowerCAmelCase_ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase_ , lowerCAmelCase_ = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
class a_ ( a_ ):
'''simple docstring'''
__a: Optional[int] = version.parse('''1.11''' )
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _lowercase ( self ) -> float:
'''simple docstring'''
return 1e-3
| 318
| 1
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowercase:
"""simple docstring"""
def __init__( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Dict=13 , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : List[Any]=6 , _lowerCAmelCase : Optional[Any]=17 , _lowerCAmelCase : Optional[int]=23 , _lowerCAmelCase : List[str]=11 , _lowerCAmelCase : List[str]=True , ) -> int:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = act_dim
_lowerCAmelCase = state_dim
_lowerCAmelCase = hidden_size
_lowerCAmelCase = max_length
_lowerCAmelCase = is_training
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Dict:
_lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
_lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
_lowerCAmelCase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
_lowerCAmelCase = random_attention_mask((self.batch_size, self.seq_length) )
_lowerCAmelCase = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> int:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , ) -> List[str]:
_lowerCAmelCase = DecisionTransformerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[str]:
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {
'states': states,
'actions': actions,
'rewards': rewards,
'returns_to_go': returns_to_go,
'timesteps': timesteps,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_torch
class __lowercase( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (DecisionTransformerModel,) if is_torch_available() else ()
UpperCamelCase_ = ()
UpperCamelCase_ = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
UpperCamelCase_ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
_lowerCAmelCase = DecisionTransformerModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Tuple:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = DecisionTransformerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowerCAmelCase )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = [
'states',
'actions',
'rewards',
'returns_to_go',
'timesteps',
'attention_mask',
]
self.assertListEqual(arg_names[: len(_lowerCAmelCase )] , _lowerCAmelCase )
@require_torch
class __lowercase( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Tuple:
_lowerCAmelCase = 2 # number of steps of autoregressive prediction we will perform
_lowerCAmelCase = 10 # defined by the RL environment, may be normalized
_lowerCAmelCase = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' )
_lowerCAmelCase = model.to(_lowerCAmelCase )
_lowerCAmelCase = model.config
torch.manual_seed(0 )
_lowerCAmelCase = torch.randn(1 , 1 , config.state_dim ).to(device=_lowerCAmelCase , dtype=torch.floataa ) # env.reset()
_lowerCAmelCase = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=_lowerCAmelCase )
_lowerCAmelCase = torch.tensor(_lowerCAmelCase , device=_lowerCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_lowerCAmelCase = state
_lowerCAmelCase = torch.zeros(1 , 0 , config.act_dim , device=_lowerCAmelCase , dtype=torch.floataa )
_lowerCAmelCase = torch.zeros(1 , 0 , device=_lowerCAmelCase , dtype=torch.floataa )
_lowerCAmelCase = torch.tensor(0 , device=_lowerCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(_lowerCAmelCase ):
_lowerCAmelCase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = torch.cat([rewards, torch.zeros(1 , 1 , device=_lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = model(
states=_lowerCAmelCase , actions=_lowerCAmelCase , rewards=_lowerCAmelCase , returns_to_go=_lowerCAmelCase , timesteps=_lowerCAmelCase , attention_mask=_lowerCAmelCase , return_dict=_lowerCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=_lowerCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
_lowerCAmelCase = action_pred[0, -1]
_lowerCAmelCase = torch.cat([states, state] , dim=1 )
_lowerCAmelCase = returns_to_go[0, -1] - reward
_lowerCAmelCase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_lowerCAmelCase = torch.cat(
[timesteps, torch.ones((1, 1) , device=_lowerCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 713
|
def _a ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_lowerCAmelCase = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__SCREAMING_SNAKE_CASE ):
return None
_lowerCAmelCase = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
_lowerCAmelCase = left
_lowerCAmelCase = point
elif point > right:
_lowerCAmelCase = right
_lowerCAmelCase = point
else:
if item < current_item:
_lowerCAmelCase = point - 1
else:
_lowerCAmelCase = point + 1
return None
def _a ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_lowerCAmelCase = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__SCREAMING_SNAKE_CASE ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif point > right:
return interpolation_search_by_recursion(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , point - 1 )
else:
return interpolation_search_by_recursion(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , point + 1 , __SCREAMING_SNAKE_CASE )
def _a ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if collection != sorted(__SCREAMING_SNAKE_CASE ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
_UpperCamelCase: Tuple =0
if debug == 1:
_UpperCamelCase: int =[10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
_UpperCamelCase: str =67
_UpperCamelCase: List[str] =interpolation_search(collection, target)
if result is not None:
print(F"{target} found at positions: {result}")
else:
print('Not found')
| 585
| 0
|
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[str] = [0] * len(SCREAMING_SNAKE_CASE )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# use last results for better performance - dynamic programming
lowerCAmelCase : Dict = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowerCAmelCase : Dict = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowerCAmelCase : List[str] = j
return prefix_result
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return max(prefix_function(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
warnings.warn(
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ChineseCLIPImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 645
| 1
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
snake_case_ = 'umt5'
snake_case_ = ['past_key_values']
def __init__( self : List[str] , snake_case : List[Any]=25_0112 , snake_case : Optional[Any]=512 , snake_case : List[str]=64 , snake_case : str=1024 , snake_case : str=8 , snake_case : int=None , snake_case : str=6 , snake_case : List[str]=32 , snake_case : int=128 , snake_case : Any=0.1 , snake_case : Optional[Any]=1e-6 , snake_case : Dict=1.0 , snake_case : Any="gated-gelu" , snake_case : Tuple=True , snake_case : Union[str, Any]=True , snake_case : Any="T5Tokenizer" , snake_case : Optional[int]=True , snake_case : Optional[int]=0 , snake_case : str=1 , snake_case : Any=0 , **snake_case : List[Any] , ) -> str:
'''simple docstring'''
super().__init__(
is_encoder_decoder=snake_case , tokenizer_class=snake_case , tie_word_embeddings=snake_case , pad_token_id=snake_case , eos_token_id=snake_case , decoder_start_token_id=snake_case , **snake_case , )
__magic_name__ : Optional[int] = vocab_size
__magic_name__ : Dict = d_model
__magic_name__ : Optional[int] = d_kv
__magic_name__ : Optional[Any] = d_ff
__magic_name__ : Dict = num_layers
__magic_name__ : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__magic_name__ : List[str] = num_heads
__magic_name__ : Any = relative_attention_num_buckets
__magic_name__ : List[str] = relative_attention_max_distance
__magic_name__ : str = dropout_rate
__magic_name__ : Optional[Any] = layer_norm_epsilon
__magic_name__ : Dict = initializer_factor
__magic_name__ : List[Any] = feed_forward_proj
__magic_name__ : Optional[Any] = use_cache
__magic_name__ : List[Any] = self.feed_forward_proj.split('''-''' )
__magic_name__ : List[Any] = act_info[-1]
__magic_name__ : List[str] = act_info[0] == '''gated'''
if len(snake_case ) > 1 and act_info[0] != "gated" or len(snake_case ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
__magic_name__ : Optional[Any] = '''gelu_new'''
@property
def _UpperCAmelCase ( self : str ) -> Optional[Any]:
'''simple docstring'''
return self.d_model
@property
def _UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
return self.num_heads
@property
def _UpperCAmelCase ( self : List[Any] ) -> Any:
'''simple docstring'''
return self.num_layers
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _UpperCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__magic_name__ : Optional[int] = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
__magic_name__ : Tuple = '''past_encoder_sequence + sequence'''
__magic_name__ : List[str] = {0: '''batch'''}
__magic_name__ : List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__magic_name__ : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
__magic_name__ : str = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(snake_case , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _UpperCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
return 13
@property
def _UpperCAmelCase ( self : Union[str, Any] ) -> float:
'''simple docstring'''
return 5e-4
| 147
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Dict = 10
def _UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
__magic_name__ : Optional[int] = [1, 2, 3, 4]
__magic_name__ : Optional[Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(snake_case , self.block_size , 0 ) , snake_case )
def _UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__magic_name__ : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(snake_case , self.block_size , 0 ) , snake_case )
def _UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
__magic_name__ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__magic_name__ : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(snake_case , self.block_size , 0 ) , snake_case )
def _UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
__magic_name__ : List[str] = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
__magic_name__ , __magic_name__ : Optional[Any] = process_story(snake_case )
self.assertEqual(snake_case , [] )
def _UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
__magic_name__ : List[str] = ''''''
__magic_name__ , __magic_name__ : Optional[int] = process_story(snake_case )
self.assertEqual(snake_case , [] )
self.assertEqual(snake_case , [] )
def _UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
__magic_name__ : Optional[Any] = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
__magic_name__ , __magic_name__ : Union[str, Any] = process_story(snake_case )
__magic_name__ : int = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(snake_case , snake_case )
__magic_name__ : Tuple = ['''It was the best of times.''']
self.assertEqual(snake_case , snake_case )
def _UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Optional[int] = torch.tensor([1, 2, 3, 4] )
__magic_name__ : Dict = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(snake_case , 0 ).numpy() , expected.numpy() )
def _UpperCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
__magic_name__ : Any = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__magic_name__ : Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(snake_case , 23 ).numpy() , expected.numpy() )
def _UpperCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__magic_name__ : Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(snake_case , 1 ).numpy() , expected.numpy() )
def _UpperCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
__magic_name__ : List[str] = 101
__magic_name__ : Union[str, Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
__magic_name__ : Optional[int] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__magic_name__ : List[str] = compute_token_type_ids(snake_case , snake_case )
np.testing.assert_array_equal(snake_case , snake_case )
| 147
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
@property
def A__ ( self ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.dummy_uncond_unet
UpperCamelCase = ScoreSdeVeScheduler()
UpperCamelCase = ScoreSdeVePipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
sde_ve.to(_SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_SCREAMING_SNAKE_CASE ).images
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )[
0
]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class a_ ( unittest.TestCase ):
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = """google/ncsnpp-church-256"""
UpperCamelCase = UNetaDModel.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = ScoreSdeVeScheduler.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = ScoreSdeVePipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
sde_ve.to(_SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=_SCREAMING_SNAKE_CASE ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 301
|
'''simple docstring'''
import math
import sys
def lowercase__ ( __UpperCamelCase )-> int:
if number != int(__UpperCamelCase ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
UpperCamelCase = [-1] * (number + 1)
UpperCamelCase = 0
for i in range(1 , number + 1 ):
UpperCamelCase = sys.maxsize
UpperCamelCase = int(math.sqrt(__UpperCamelCase ) )
for j in range(1 , root + 1 ):
UpperCamelCase = 1 + answers[i - (j**2)]
UpperCamelCase = min(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ : str = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Union[str, Any] = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Any = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__magic_name__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A__ ( A_ , A_ , A_ , A_ , A_ , A_ ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
_lowercase = ksize + 1
_lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(A_ ):
for x in range(A_ ):
# distance from center
_lowercase = x - ksize // 2
_lowercase = y - ksize // 2
# degree to radiant
_lowercase = theta / 180 * np.pi
_lowercase = np.cos(_theta )
_lowercase = np.sin(_theta )
# get kernel x
_lowercase = cos_theta * px + sin_theta * py
# get kernel y
_lowercase = -sin_theta * px + cos_theta * py
# fill kernel
_lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__magic_name__ : List[Any] = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
__magic_name__ : str = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__magic_name__ : List[Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__magic_name__ : int = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__magic_name__ : int = out / out.max() * 255
__magic_name__ : str = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 602
| 0
|
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowercase__ ( __UpperCamelCase : str ):
'''simple docstring'''
def decorator(__UpperCamelCase : Union[str, Any] ):
__lowercase = getattr(__UpperCamelCase , """handle_key""" , [] )
handle += [key]
setattr(__UpperCamelCase , """handle_key""" , __UpperCamelCase )
return func
return decorator
def lowercase__ ( *__UpperCamelCase : List[str] ):
'''simple docstring'''
def decorator(__UpperCamelCase : List[str] ):
__lowercase = getattr(__UpperCamelCase , """handle_key""" , [] )
handle += keys
setattr(__UpperCamelCase , """handle_key""" , __UpperCamelCase )
return func
return decorator
class lowerCamelCase__( snake_case_ ):
def __new__( cls , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = super().__new__(cls , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if not hasattr(__UpperCAmelCase , """key_handler""" ):
setattr(__UpperCAmelCase , """key_handler""" , {} )
setattr(__UpperCAmelCase , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__lowercase = getattr(__UpperCAmelCase , """handle_key""" , [] )
for key in handled_keys:
__lowercase = value
return new_cls
@staticmethod
def __magic_name__ ( cls ):
"""simple docstring"""
__lowercase = get_character()
if char != KEYMAP["undefined"]:
__lowercase = ord(__UpperCAmelCase )
__lowercase = cls.key_handler.get(__UpperCAmelCase )
if handler:
__lowercase = char
return handler(cls )
else:
return None
def lowercase__ ( cls : Union[str, Any] ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 566
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase : list , __UpperCamelCase : int , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 0 ):
'''simple docstring'''
__lowercase = right or len(__UpperCamelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__UpperCamelCase , __UpperCamelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 566
| 1
|
from __future__ import annotations
def snake_case (__lowercase ) -> list[int]:
'''simple docstring'''
_snake_case : Optional[int] = [True] * limit
_snake_case : Optional[int] = False
_snake_case : Tuple = False
_snake_case : Dict = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
_snake_case : Optional[int] = i * 2
while index < limit:
_snake_case : Any = False
_snake_case : List[str] = index + i
_snake_case : List[Any] = [2]
for i in range(3 , __lowercase , 2 ):
if is_prime[i]:
primes.append(__lowercase )
return primes
def snake_case (__lowercase = 1_000_000 ) -> int:
'''simple docstring'''
_snake_case : str = prime_sieve(__lowercase )
_snake_case : Union[str, Any] = 0
_snake_case : Optional[int] = 0
for i in range(len(__lowercase ) ):
for j in range(i + length , len(__lowercase ) ):
_snake_case : Dict = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
_snake_case : List[Any] = j - i
_snake_case : Tuple = sol
return largest
if __name__ == "__main__":
print(F'''{solution() = }''')
| 580
|
from math import pi
def snake_case (__lowercase , __lowercase ) -> float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 580
| 1
|
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class _SCREAMING_SNAKE_CASE ( tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1.0 , lowerCamelCase__ = None , ) -> Tuple:
super().__init__()
lowercase__ : int = initial_learning_rate
lowercase__ : Any = warmup_steps
lowercase__ : Tuple = power
lowercase__ : str = decay_schedule_fn
lowercase__ : str = name
def __call__( self , lowerCamelCase__ ) -> Any:
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowercase__ : Optional[Any] = tf.cast(lowerCamelCase__ , tf.floataa )
lowercase__ : List[Any] = tf.cast(self.warmup_steps , tf.floataa )
lowercase__ : Optional[int] = global_step_float / warmup_steps_float
lowercase__ : str = self.initial_learning_rate * tf.math.pow(lowerCamelCase__ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowerCamelCase__ , )
def UpperCAmelCase__( self ) -> List[str]:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : int = 0.0 , lowerCamelCase__ : Any = 0.9 , lowerCamelCase__ : Optional[int] = 0.999 , lowerCamelCase__ : Tuple = 1E-8 , lowerCamelCase__ : Tuple = None , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : List[str] = 0.0 , lowerCamelCase__ : List[str] = 1.0 , lowerCamelCase__ : Dict = None , ):
lowercase__ : List[Any] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=lowercase__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=lowercase__ , )
if num_warmup_steps:
lowercase__ : Tuple = WarmUp(
initial_learning_rate=lowercase__ , decay_schedule_fn=lowercase__ , warmup_steps=lowercase__ , )
if weight_decay_rate > 0.0:
lowercase__ : Any = AdamWeightDecay(
learning_rate=lowercase__ , weight_decay_rate=lowercase__ , beta_a=lowercase__ , beta_a=lowercase__ , epsilon=lowercase__ , clipnorm=lowercase__ , global_clipnorm=lowercase__ , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=lowercase__ , )
else:
lowercase__ : int = tf.keras.optimizers.Adam(
learning_rate=lowercase__ , beta_a=lowercase__ , beta_a=lowercase__ , epsilon=lowercase__ , clipnorm=lowercase__ , global_clipnorm=lowercase__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class _SCREAMING_SNAKE_CASE ( lowercase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ = 0.001 , lowerCamelCase__ = 0.9 , lowerCamelCase__ = 0.999 , lowerCamelCase__ = 1E-7 , lowerCamelCase__ = False , lowerCamelCase__ = 0.0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "AdamWeightDecay" , **lowerCamelCase__ , ) -> int:
super().__init__(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
lowercase__ : Optional[int] = weight_decay_rate
lowercase__ : Any = include_in_weight_decay
lowercase__ : List[Any] = exclude_from_weight_decay
@classmethod
def UpperCAmelCase__( cls , lowerCamelCase__ ) -> Optional[Any]:
lowercase__ : Optional[int] = {"""WarmUp""": WarmUp}
return super(lowerCamelCase__ , cls ).from_config(lowerCamelCase__ , custom_objects=lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
super(lowerCamelCase__ , self )._prepare_local(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : str = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
lowercase__ : Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ) -> Optional[int]:
lowercase__ , lowercase__ : List[str] = list(zip(*lowerCamelCase__ ) )
return super(lowerCamelCase__ , self ).apply_gradients(zip(lowerCamelCase__ , lowerCamelCase__ ) , name=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowercase__ : int = apply_state or {}
lowercase__ : Optional[int] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowercase__ : Dict = self._fallback_apply_state(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Optional[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> Optional[Any]:
lowercase__ , lowercase__ : int = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase__ )
lowercase__ : Tuple = self._decay_weights_op(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__ , self )._resource_apply_dense(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> List[Any]:
lowercase__ , lowercase__ : Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase__ )
lowercase__ : Dict = self._decay_weights_op(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__ , self )._resource_apply_sparse(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__( self ) -> List[Any]:
lowercase__ : int = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Optional[Any]:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCamelCase__ , lowerCamelCase__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCamelCase__ , lowerCamelCase__ ) is not None:
return False
return True
class _SCREAMING_SNAKE_CASE ( lowercase_ ):
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
lowercase__ : Optional[Any] = []
lowercase__ : int = None
@property
def UpperCAmelCase__( self ) -> Dict:
if self._accum_steps is None:
lowercase__ : Optional[int] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowerCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCAmelCase__( self ) -> List[Any]:
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , lowerCamelCase__ ) -> str:
if not self._gradients:
lowercase__ : Any = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCamelCase__ ) , trainable=lowerCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCamelCase__ ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase__ )}''' )
for accum_gradient, gradient in zip(self._gradients , lowerCamelCase__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCamelCase__ )
self._accum_steps.assign_add(1 )
def UpperCAmelCase__( self ) -> Optional[Any]:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCamelCase__ ) )
| 200
|
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 1000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 ,lowercase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 685
| 0
|
# Algorithm for the pigeonhole sorting
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ ) # min() finds the minimum value
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ ) # max() finds the maximum value
SCREAMING_SNAKE_CASE = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
SCREAMING_SNAKE_CASE = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
SCREAMING_SNAKE_CASE = 0
for count in range(UpperCAmelCase__ ):
while holes[count] > 0:
holes[count] -= 1
SCREAMING_SNAKE_CASE = count + min_val
i += 1
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(UpperCAmelCase__ )
print("Sorted order is:" , " ".join(UpperCAmelCase__ ) )
if __name__ == "__main__":
main()
| 707
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Optional[int] = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 647
| 0
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=1000 , _SCREAMING_SNAKE_CASE=[3, 3, 6, 4] , _SCREAMING_SNAKE_CASE=[48, 56, 112, 220] , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = num_labels
_UpperCAmelCase = image_size
_UpperCAmelCase = layer_depths
_UpperCAmelCase = embed_dims
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_SCREAMING_SNAKE_CASE , layer_scale_init_value=1e-5 , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = SwiftFormerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_UpperCAmelCase = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = self.prepare_config_and_inputs()
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_a : int = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
_a : List[str] = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
_a : str = False
_a : Union[str, Any] = False
_a : Optional[Any] = False
_a : Dict = False
_a : int = False
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = SwiftFormerModelTester(self )
_UpperCAmelCase = ConfigTester(
self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = SwiftFormerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@unittest.skip(reason='SwiftFormer does not output attentions' )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = 8
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
def _config_zero_init(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = copy.deepcopy(_SCREAMING_SNAKE_CASE )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1e-1_0 )
if isinstance(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = _config_zero_init(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return configs_no_init
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=_SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase__ ( ) -> int:
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 618
|
import math
import qiskit
def lowerCAmelCase__ ( a__: int = 1 , a__: int = 1 , a__: int = 1 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if (
isinstance(a__ , a__ )
or isinstance(a__ , a__ )
or isinstance(a__ , a__ )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(a__ ) != input_a)
or (math.floor(a__ ) != input_a)
or (math.floor(a__ ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
_UpperCAmelCase = qiskit.QuantumRegister(4 , 'qr' )
_UpperCAmelCase = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
_UpperCAmelCase = [input_a, input_a, carry_in]
_UpperCAmelCase = qiskit.QuantumCircuit(a__ , a__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(a__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(a__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(a__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , a__ ) # measure the last two qbits
_UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
_UpperCAmelCase = qiskit.execute(a__ , a__ , shots=1_0_0_0 )
return job.result().get_counts(a__ )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 618
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Optional[Any] ={
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str =[
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
_lowercase : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 713
|
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
_lowercase : str =False
_lowercase : Optional[Any] =False
def A__ ( lowercase: Namespace ) -> Optional[int]:
return TrainCommand(lowercase )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Dict:
A : Optional[Any] =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=SCREAMING_SNAKE_CASE__ , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=SCREAMING_SNAKE_CASE__ , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=SCREAMING_SNAKE_CASE__ , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=SCREAMING_SNAKE_CASE__ , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=SCREAMING_SNAKE_CASE__ , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=SCREAMING_SNAKE_CASE__ , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=SCREAMING_SNAKE_CASE__ , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=SCREAMING_SNAKE_CASE__ , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Namespace ) -> List[Any]:
A : Optional[int] =logging.get_logger('transformers-cli/training' )
A : Dict ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =args.output
A : List[str] =args.column_label
A : int =args.column_text
A : Union[str, Any] =args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
A : Optional[Any] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
A : Tuple =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Dict =None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
A : List[Any] =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Optional[Any] =args.validation_split
A : str =args.train_batch_size
A : Any =args.valid_batch_size
A : Dict =args.learning_rate
A : List[str] =args.adam_epsilon
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[str]:
raise NotImplementedError
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 661
| 0
|
"""simple docstring"""
def lowercase__ ( snake_case_ :int = 4_000_000 ):
__UpperCAmelCase = [0, 1]
__UpperCAmelCase = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__UpperCAmelCase = 0
for j in range(len(snake_case_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 49
|
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowercase : Union[str, Any] = logging.getLogger(__name__)
_lowercase : Optional[Any] = 'Hello world! cécé herlolip'
_lowercase : str = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowercase__ ( snake_case_ :Any , snake_case_ :int ):
__UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , )
__UpperCAmelCase = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage )
__UpperCAmelCase = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ )
original.eval()
__UpperCAmelCase = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
__UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__UpperCAmelCase = encoder_input_ids
__UpperCAmelCase = decoder_input_ids
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__UpperCAmelCase = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = original.generator(snake_case_ )
__UpperCAmelCase = new_model(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = new_model.generator(snake_case_ )
__UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_lowercase : List[str] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 49
| 1
|
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase_ ( lowerCAmelCase__ ):
def __init__( self: List[str]):
'''simple docstring'''
__lowerCAmelCase = []
def _lowercase ( self: Dict, _lowercase: Any, _lowercase: Tuple, _lowercase: Optional[Any], **_lowercase: Any):
'''simple docstring'''
self.events.append("""on_init_end""")
def _lowercase ( self: Tuple, _lowercase: Optional[int], _lowercase: int, _lowercase: Tuple, **_lowercase: List[Any]):
'''simple docstring'''
self.events.append("""on_train_begin""")
def _lowercase ( self: Dict, _lowercase: Tuple, _lowercase: Optional[Any], _lowercase: List[str], **_lowercase: str):
'''simple docstring'''
self.events.append("""on_train_end""")
def _lowercase ( self: int, _lowercase: Union[str, Any], _lowercase: Any, _lowercase: str, **_lowercase: List[str]):
'''simple docstring'''
self.events.append("""on_epoch_begin""")
def _lowercase ( self: List[Any], _lowercase: str, _lowercase: Dict, _lowercase: Optional[Any], **_lowercase: int):
'''simple docstring'''
self.events.append("""on_epoch_end""")
def _lowercase ( self: Optional[Any], _lowercase: List[str], _lowercase: List[Any], _lowercase: Union[str, Any], **_lowercase: Union[str, Any]):
'''simple docstring'''
self.events.append("""on_step_begin""")
def _lowercase ( self: Dict, _lowercase: Dict, _lowercase: Tuple, _lowercase: Any, **_lowercase: str):
'''simple docstring'''
self.events.append("""on_step_end""")
def _lowercase ( self: Union[str, Any], _lowercase: str, _lowercase: List[Any], _lowercase: List[str], **_lowercase: List[str]):
'''simple docstring'''
self.events.append("""on_evaluate""")
def _lowercase ( self: Optional[int], _lowercase: List[Any], _lowercase: List[Any], _lowercase: List[Any], **_lowercase: str):
'''simple docstring'''
self.events.append("""on_predict""")
def _lowercase ( self: str, _lowercase: List[Any], _lowercase: int, _lowercase: Dict, **_lowercase: int):
'''simple docstring'''
self.events.append("""on_save""")
def _lowercase ( self: Any, _lowercase: int, _lowercase: List[Any], _lowercase: List[Any], **_lowercase: List[str]):
'''simple docstring'''
self.events.append("""on_log""")
def _lowercase ( self: Optional[Any], _lowercase: Optional[int], _lowercase: int, _lowercase: int, **_lowercase: Union[str, Any]):
'''simple docstring'''
self.events.append("""on_prediction_step""")
@require_torch
class lowercase_ ( unittest.TestCase ):
def _lowercase ( self: Optional[Any]):
'''simple docstring'''
__lowerCAmelCase = tempfile.mkdtemp()
def _lowercase ( self: Optional[Any]):
'''simple docstring'''
shutil.rmtree(self.output_dir)
def _lowercase ( self: int, _lowercase: List[Any]=0, _lowercase: Optional[int]=0, _lowercase: Optional[Any]=64, _lowercase: Tuple=64, _lowercase: int=None, _lowercase: int=False, **_lowercase: Dict):
'''simple docstring'''
__lowerCAmelCase = RegressionDataset(length=_lowercase)
__lowerCAmelCase = RegressionDataset(length=_lowercase)
__lowerCAmelCase = RegressionModelConfig(a=_lowercase, b=_lowercase)
__lowerCAmelCase = RegressionPreTrainedModel(_lowercase)
__lowerCAmelCase = TrainingArguments(self.output_dir, disable_tqdm=_lowercase, report_to=[], **_lowercase)
return Trainer(
_lowercase, _lowercase, train_dataset=_lowercase, eval_dataset=_lowercase, callbacks=_lowercase, )
def _lowercase ( self: Any, _lowercase: str, _lowercase: List[str]):
'''simple docstring'''
self.assertEqual(len(_lowercase), len(_lowercase))
# Order doesn't matter
__lowerCAmelCase = sorted(_lowercase, key=lambda _lowercase: cb.__name__ if isinstance(_lowercase, _lowercase) else cb.__class__.__name__)
__lowerCAmelCase = sorted(_lowercase, key=lambda _lowercase: cb.__name__ if isinstance(_lowercase, _lowercase) else cb.__class__.__name__)
for cba, cba in zip(_lowercase, _lowercase):
if isinstance(_lowercase, _lowercase) and isinstance(_lowercase, _lowercase):
self.assertEqual(_lowercase, _lowercase)
elif isinstance(_lowercase, _lowercase) and not isinstance(_lowercase, _lowercase):
self.assertEqual(_lowercase, cba.__class__)
elif not isinstance(_lowercase, _lowercase) and isinstance(_lowercase, _lowercase):
self.assertEqual(cba.__class__, _lowercase)
else:
self.assertEqual(_lowercase, _lowercase)
def _lowercase ( self: Tuple, _lowercase: Union[str, Any]):
'''simple docstring'''
__lowerCAmelCase = ["""on_init_end""", """on_train_begin"""]
__lowerCAmelCase = 0
__lowerCAmelCase = len(trainer.get_eval_dataloader())
__lowerCAmelCase = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader()) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs):
expected_events.append("""on_epoch_begin""")
for _ in range(_lowercase):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""")
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""")
expected_events.append("""on_epoch_end""")
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _lowercase ( self: Union[str, Any]):
'''simple docstring'''
__lowerCAmelCase = self.get_trainer()
__lowerCAmelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks, _lowercase)
# Callbacks passed at init are added to the default callbacks
__lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback])
expected_callbacks.append(_lowercase)
self.check_callbacks_equality(trainer.callback_handler.callbacks, _lowercase)
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowerCAmelCase = self.get_trainer(disable_tqdm=_lowercase)
__lowerCAmelCase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks, _lowercase)
def _lowercase ( self: Tuple):
'''simple docstring'''
__lowerCAmelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowerCAmelCase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(_lowercase)
expected_callbacks.remove(_lowercase)
self.check_callbacks_equality(trainer.callback_handler.callbacks, _lowercase)
__lowerCAmelCase = self.get_trainer()
__lowerCAmelCase = trainer.pop_callback(_lowercase)
self.assertEqual(cb.__class__, _lowercase)
self.check_callbacks_equality(trainer.callback_handler.callbacks, _lowercase)
trainer.add_callback(_lowercase)
expected_callbacks.insert(0, _lowercase)
self.check_callbacks_equality(trainer.callback_handler.callbacks, _lowercase)
# We can also add, pop, or remove by instance
__lowerCAmelCase = self.get_trainer()
__lowerCAmelCase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(_lowercase)
expected_callbacks.remove(_lowercase)
self.check_callbacks_equality(trainer.callback_handler.callbacks, _lowercase)
__lowerCAmelCase = self.get_trainer()
__lowerCAmelCase = trainer.callback_handler.callbacks[0]
__lowerCAmelCase = trainer.pop_callback(_lowercase)
self.assertEqual(_lowercase, _lowercase)
self.check_callbacks_equality(trainer.callback_handler.callbacks, _lowercase)
trainer.add_callback(_lowercase)
expected_callbacks.insert(0, _lowercase)
self.check_callbacks_equality(trainer.callback_handler.callbacks, _lowercase)
def _lowercase ( self: str):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""", category=_lowercase)
__lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback])
trainer.train()
__lowerCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_lowercase, self.get_expected_events(_lowercase))
# Independent log/save/eval
__lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback], logging_steps=5)
trainer.train()
__lowerCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_lowercase, self.get_expected_events(_lowercase))
__lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback], save_steps=5)
trainer.train()
__lowerCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_lowercase, self.get_expected_events(_lowercase))
__lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_steps=5, evaluation_strategy="""steps""")
trainer.train()
__lowerCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_lowercase, self.get_expected_events(_lowercase))
__lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback], evaluation_strategy="""epoch""")
trainer.train()
__lowerCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_lowercase, self.get_expected_events(_lowercase))
# A bit of everything
__lowerCAmelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback], logging_steps=3, save_steps=10, eval_steps=5, evaluation_strategy="""steps""", )
trainer.train()
__lowerCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_lowercase, self.get_expected_events(_lowercase))
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""") as warn_mock:
__lowerCAmelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback], )
assert str(_lowercase) in warn_mock.call_args[0][0]
| 334
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__A : int = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = ["BeitFeatureExtractor"]
__A : List[str] = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 334
| 1
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowerCAmelCase = (720, 1_280) # Height, Width
_lowerCAmelCase = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowerCAmelCase = 1 / 100
_lowerCAmelCase = ""
_lowerCAmelCase = ""
_lowerCAmelCase = ""
_lowerCAmelCase = 250
def _snake_case ( ):
_UpperCamelCase , _UpperCamelCase = get_dataset(__snake_case , __snake_case )
for index in range(__snake_case ):
_UpperCamelCase = random.sample(range(len(__snake_case ) ) , 4 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = update_image_and_anno(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , filter_scale=__snake_case , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCamelCase = random_chars(32 )
_UpperCamelCase = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
_UpperCamelCase = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , __snake_case , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
_UpperCamelCase = []
for anno in new_annos:
_UpperCamelCase = anno[3] - anno[1]
_UpperCamelCase = anno[4] - anno[2]
_UpperCamelCase = anno[1] + width / 2
_UpperCamelCase = anno[2] + height / 2
_UpperCamelCase = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(__snake_case )
with open(f"""{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = []
_UpperCamelCase = []
for label_file in glob.glob(os.path.join(__snake_case , '''*.txt''' ) ):
_UpperCamelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__snake_case ) as in_file:
_UpperCamelCase = in_file.readlines()
_UpperCamelCase = os.path.join(__snake_case , f"""{label_name}.jpg""" )
_UpperCamelCase = []
for obj_list in obj_lists:
_UpperCamelCase = obj_list.rstrip('''\n''' ).split(''' ''' )
_UpperCamelCase = float(obj[1] ) - float(obj[3] ) / 2
_UpperCamelCase = float(obj[2] ) - float(obj[4] ) / 2
_UpperCamelCase = float(obj[1] ) + float(obj[3] ) / 2
_UpperCamelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__snake_case )
labels.append(__snake_case )
return img_paths, labels
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = 0.0 , ):
_UpperCamelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_UpperCamelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase = int(scale_x * output_size[1] )
_UpperCamelCase = int(scale_y * output_size[0] )
_UpperCamelCase = []
_UpperCamelCase = []
for i, index in enumerate(__snake_case ):
_UpperCamelCase = all_img_list[index]
path_list.append(__snake_case )
_UpperCamelCase = all_annos[index]
_UpperCamelCase = cva.imread(__snake_case )
if i == 0: # top-left
_UpperCamelCase = cva.resize(__snake_case , (divid_point_x, divid_point_y) )
_UpperCamelCase = img
for bbox in img_annos:
_UpperCamelCase = bbox[1] * scale_x
_UpperCamelCase = bbox[2] * scale_y
_UpperCamelCase = bbox[3] * scale_x
_UpperCamelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_UpperCamelCase = cva.resize(__snake_case , (output_size[1] - divid_point_x, divid_point_y) )
_UpperCamelCase = img
for bbox in img_annos:
_UpperCamelCase = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase = bbox[2] * scale_y
_UpperCamelCase = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_UpperCamelCase = cva.resize(__snake_case , (divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase = img
for bbox in img_annos:
_UpperCamelCase = bbox[1] * scale_x
_UpperCamelCase = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase = bbox[3] * scale_x
_UpperCamelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_UpperCamelCase = cva.resize(
__snake_case , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase = img
for bbox in img_annos:
_UpperCamelCase = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_UpperCamelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _snake_case ( __snake_case ):
assert number_char > 1, "The number of character should greater than 1"
_UpperCamelCase = ascii_lowercase + digits
return "".join(random.choice(__snake_case ) for _ in range(__snake_case ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 10
|
import sys
from collections import defaultdict
class lowerCAmelCase_ :
def __init__( self : Optional[int] ):
_UpperCamelCase = []
def UpperCamelCase_ ( self : Any , _A : str ):
return self.node_position[vertex]
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ):
_UpperCamelCase = pos
def UpperCamelCase_ ( self : Any , _A : List[str] , _A : int , _A : Optional[Any] , _A : Union[str, Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCamelCase = 2 * start + 1
else:
_UpperCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCamelCase , _UpperCamelCase = heap[smallest_child], positions[smallest_child]
_UpperCamelCase , _UpperCamelCase = (
heap[start],
positions[start],
)
_UpperCamelCase , _UpperCamelCase = temp, tempa
_UpperCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : Optional[Any] , _A : int , _A : Optional[int] ):
_UpperCamelCase = position[index]
while index != 0:
_UpperCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_UpperCamelCase = heap[parent]
_UpperCamelCase = position[parent]
self.set_position(position[parent] , _A )
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , _A )
break
_UpperCamelCase = parent
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , 0 )
def UpperCamelCase_ ( self : int , _A : Tuple , _A : int ):
_UpperCamelCase = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCamelCase_ ( self : Any , _A : int , _A : List[str] ):
_UpperCamelCase = positions[0]
_UpperCamelCase = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def _snake_case ( __snake_case ):
_UpperCamelCase = Heap()
_UpperCamelCase = [0] * len(__snake_case )
_UpperCamelCase = [-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCamelCase = []
for vertex in range(len(__snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(__snake_case )
heap.node_position.append(__snake_case )
_UpperCamelCase = []
_UpperCamelCase = 1
_UpperCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCamelCase = 0
_UpperCamelCase = distance
heap.heapify(__snake_case , __snake_case )
for _ in range(1 , len(__snake_case ) ):
_UpperCamelCase = heap.delete_minimum(__snake_case , __snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__snake_case )]
):
_UpperCamelCase = distance
heap.bottom_to_top(
__snake_case , heap.get_position(__snake_case ) , __snake_case , __snake_case )
_UpperCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowerCAmelCase = int(input("Enter number of edges: ").strip())
_lowerCAmelCase = defaultdict(list)
for _ in range(edges_number):
_lowerCAmelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 10
| 1
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class a_( lowercase__ ):
"""simple docstring"""
def __UpperCamelCase ( self : Any) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = 8
# DPR tok
SCREAMING_SNAKE_CASE = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , 'dpr_tokenizer')
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , DPR_VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
# BART tok
SCREAMING_SNAKE_CASE = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
SCREAMING_SNAKE_CASE = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , 'bart_tokenizer')
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , BART_VOCAB_FILES_NAMES['vocab_file'])
SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , BART_VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowerCAmelCase__) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(lowerCAmelCase__))
def __UpperCamelCase ( self : Tuple) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer'))
def __UpperCamelCase ( self : int) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer'))
def __UpperCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
@require_tokenizers
def __UpperCamelCase ( self : int) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , 'rag_tokenizer')
SCREAMING_SNAKE_CASE = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict())
SCREAMING_SNAKE_CASE = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer())
rag_config.save_pretrained(lowerCAmelCase__)
rag_tokenizer.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained(lowerCAmelCase__ , config=lowerCAmelCase__)
self.assertIsInstance(new_rag_tokenizer.question_encoder , lowerCAmelCase__)
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab())
self.assertIsInstance(new_rag_tokenizer.generator , lowerCAmelCase__)
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab())
@slow
def __UpperCamelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained('facebook/rag-token-nq')
SCREAMING_SNAKE_CASE = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
@slow
def __UpperCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained('facebook/rag-sequence-nq')
SCREAMING_SNAKE_CASE = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
| 259
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase = logging.getLogger()
def A_ ( ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('-f' )
SCREAMING_SNAKE_CASE = parser.parse_args()
return args.f
class a_( lowercase__ ):
"""simple docstring"""
def __UpperCamelCase ( self : Optional[Any]) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE = logging.StreamHandler(sys.stdout)
logger.addHandler(lowerCAmelCase__)
def __UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : List[Any]) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , 'run_glue_deebert.py')
with patch.object(lowerCAmelCase__ , 'argv' , lowerCAmelCase__):
SCREAMING_SNAKE_CASE = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCAmelCase__ , 0.6_66)
@slow
@require_torch_non_multi_gpu
def __UpperCamelCase ( self : Any) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCAmelCase__)
| 259
| 1
|
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 656
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger(__name__)
__A : Any = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
__A : Optional[int] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
A__ : Union[str, Any] =torch.load(UpperCamelCase , map_location="cpu" )
return sd
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : int=rename_keys_prefix ):
"""simple docstring"""
A__ : List[str] =OrderedDict()
A__ : str =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A__ : Optional[Any] =key
for name_pair in rename_keys_prefix:
A__ : int =new_key.replace(name_pair[0] , name_pair[1] )
A__ : Dict =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A__ : Optional[int] =new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : List[str] ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
A__ : Any ="pretraining"
if "vcr" in checkpoint_path:
A__ : Union[str, Any] ={"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
A__ : Optional[Any] ={"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
A__ : List[str] ={"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 512}
A__ : List[str] ="multichoice"
elif "vqa_advanced" in checkpoint_path:
A__ : Any ={"visual_embedding_dim": 2048}
A__ : str ="vqa_advanced"
elif "vqa" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 2048, "num_labels": 3129}
A__ : str ="vqa"
elif "nlvr" in checkpoint_path:
A__ : str ={
"visual_embedding_dim": 1024,
"num_labels": 2,
}
A__ : Dict ="nlvr"
A__ : Union[str, Any] =VisualBertConfig(**UpperCamelCase )
# Load State Dict
A__ : int =load_state_dict(UpperCamelCase )
A__ : Tuple =get_new_dict(UpperCamelCase , UpperCamelCase )
if model_type == "pretraining":
A__ : str =VisualBertForPreTraining(UpperCamelCase )
elif model_type == "vqa":
A__ : Optional[int] =VisualBertForQuestionAnswering(UpperCamelCase )
elif model_type == "nlvr":
A__ : Union[str, Any] =VisualBertForVisualReasoning(UpperCamelCase )
elif model_type == "multichoice":
A__ : Union[str, Any] =VisualBertForMultipleChoice(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
# Save Checkpoints
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
__A : str = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 656
| 1
|
"""simple docstring"""
def _snake_case ( _snake_case : int ) -> bool:
'''simple docstring'''
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 505
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
a = get_logger(__name__)
a = Path(__file__).parent / '''model_card_template.md'''
a = uuida().hex
a = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
a = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
a = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def _snake_case ( _snake_case : Union[Dict, str, None] = None ) -> str:
'''simple docstring'''
_A = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_snake_case , _snake_case ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(_snake_case , _snake_case ):
ua += "; " + user_agent
return ua
def _snake_case ( _snake_case : str , _snake_case : Optional[str] = None , _snake_case : Optional[str] = None ) -> List[Any]:
'''simple docstring'''
if token is None:
_A = HfFolder.get_token()
if organization is None:
_A = whoami(_snake_case )['name']
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def _snake_case ( _snake_case : Optional[int] , _snake_case : int ) -> List[Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(_snake_case , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
_A = args.hub_token if hasattr(_snake_case , 'hub_token' ) else None
_A = get_full_repo_name(_snake_case , token=_snake_case )
_A = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_snake_case , model_name=_snake_case , repo_name=_snake_case , dataset_name=args.dataset_name if hasattr(_snake_case , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_snake_case , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(_snake_case , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(_snake_case , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_snake_case , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(_snake_case , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(_snake_case , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_snake_case , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_snake_case , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(_snake_case , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(_snake_case , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
_A = os.path.join(args.output_dir , 'README.md' )
model_card.save(_snake_case )
def _snake_case ( _snake_case : Optional[str] , _snake_case : Optional[str] = None ) -> Optional[int]:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
_A = str(Path(_snake_case ).as_posix() )
_A = re.search(R'snapshots/([^/]+)/' , _snake_case )
if search is None:
return None
_A = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
a = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
a = os.path.join(hf_cache_home, '''diffusers''')
def _snake_case ( _snake_case : Optional[str] = None , _snake_case : Optional[str] = None ) -> None:
'''simple docstring'''
if new_cache_dir is None:
_A = DIFFUSERS_CACHE
if old_cache_dir is None:
_A = old_diffusers_cache
_A = Path(_snake_case ).expanduser()
_A = Path(_snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
_A = new_cache_dir / old_blob_path.relative_to(_snake_case )
new_blob_path.parent.mkdir(parents=_snake_case , exist_ok=_snake_case )
os.replace(_snake_case , _snake_case )
try:
os.symlink(_snake_case , _snake_case )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
a = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
a = 0
else:
with open(cache_version_file) as f:
try:
a = int(f.read())
except ValueError:
a = 0
if cache_version < 1:
a = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
a = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'''the directory exists and can be written to.'''
)
def _snake_case ( _snake_case : str , _snake_case : Optional[str] = None ) -> str:
'''simple docstring'''
if variant is not None:
_A = weights_name.split('.' )
_A = splits[:-1] + [variant] + splits[-1:]
_A = '.'.join(_snake_case )
return weights_name
def _snake_case ( _snake_case : Tuple , *,
_snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Any , _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : List[str]=None , ) -> int:
'''simple docstring'''
_A = str(_snake_case )
if os.path.isfile(_snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(_snake_case ):
if os.path.isfile(os.path.join(_snake_case , _snake_case ) ):
# Load from a PyTorch checkpoint
_A = os.path.join(_snake_case , _snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_snake_case , _snake_case , _snake_case ) ):
_A = os.path.join(_snake_case , _snake_case , _snake_case )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_snake_case ).base_version ) >= version.parse('0.20.0' )
):
try:
_A = hf_hub_download(
_snake_case , filename=_add_variant(_snake_case , _snake_case ) , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , use_auth_token=_snake_case , user_agent=_snake_case , subfolder=_snake_case , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , _snake_case , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_snake_case , _snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(_snake_case , _snake_case )}\' so that the correct variant file can be added.''' , _snake_case , )
try:
# 2. Load model file as usual
_A = hf_hub_download(
_snake_case , filename=_snake_case , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , use_auth_token=_snake_case , user_agent=_snake_case , subfolder=_snake_case , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'this model name. Check the model page at '
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' )
| 505
| 1
|
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ = 1 / 1_2345 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = 0
_SCREAMING_SNAKE_CASE : str = 0
_SCREAMING_SNAKE_CASE : int = 3
while True:
_SCREAMING_SNAKE_CASE : Optional[int] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : int = int(SCREAMING_SNAKE_CASE__ )
total_partitions += 1
if check_partition_perfect(SCREAMING_SNAKE_CASE__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(SCREAMING_SNAKE_CASE__ )
integer += 1
if __name__ == "__main__":
print(F"{solution() = }")
| 533
|
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return "".join(chr(ord(SCREAMING_SNAKE_CASE__ ) - 32 ) if """a""" <= char <= """z""" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 533
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json'
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''fnet'''
def __init__( self ,_SCREAMING_SNAKE_CASE=32_000 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu_new" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-12 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,**_SCREAMING_SNAKE_CASE ,) -> Union[str, Any]:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : int = type_vocab_size
UpperCAmelCase_ : str = layer_norm_eps
UpperCAmelCase_ : Optional[int] = use_tpu_fourier_optimizations
UpperCAmelCase_ : Any = tpu_short_seq_length
| 710
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Dict = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
UpperCAmelCase_ : int = dict(zip(_SCREAMING_SNAKE_CASE ,range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase_ : List[Any] = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
UpperCAmelCase_ : Dict = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16_000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
UpperCAmelCase_ : str = tempfile.mkdtemp()
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : List[str] = os.path.join(self.tmpdirname ,_SCREAMING_SNAKE_CASE )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.feature_extraction_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' )
# load decoder from hub
UpperCAmelCase_ : str = '''hf-internal-testing/ngram-beam-search-decoder'''
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ : int = self.add_kwargs_tokens_map.copy()
kwargs.update(_SCREAMING_SNAKE_CASE )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Any:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Dict:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : List[str] = self.get_feature_extractor()
UpperCAmelCase_ : Tuple = self.get_decoder()
UpperCAmelCase_ : Tuple = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : List[str] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_SCREAMING_SNAKE_CASE )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,_SCREAMING_SNAKE_CASE )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCAmelCase_ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : str = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE ,'''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Any = self.get_feature_extractor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : List[str] = self.get_decoder()
UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = floats_list((3, 1_000) )
UpperCAmelCase_ : Any = feature_extractor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
UpperCAmelCase_ : Any = processor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = self.get_feature_extractor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = self.get_decoder()
UpperCAmelCase_ : str = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = '''This is a test string'''
UpperCAmelCase_ : Optional[int] = processor(text=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = tokenizer(_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def a__ ( self ,_SCREAMING_SNAKE_CASE=(2, 10, 16) ,_SCREAMING_SNAKE_CASE=77 ) -> int:
np.random.seed(_SCREAMING_SNAKE_CASE )
return np.random.rand(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : int = self.get_feature_extractor()
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = self.get_decoder()
UpperCAmelCase_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
UpperCAmelCase_ : Optional[Any] = processor.decode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = decoder.decode_beams(_SCREAMING_SNAKE_CASE )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.get_feature_extractor()
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_decoder()
UpperCAmelCase_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCAmelCase_ : Dict = processor.batch_decode(_SCREAMING_SNAKE_CASE )
else:
with get_context(_SCREAMING_SNAKE_CASE ).Pool() as pool:
UpperCAmelCase_ : str = processor.batch_decode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = list(_SCREAMING_SNAKE_CASE )
with get_context('''fork''' ).Pool() as p:
UpperCAmelCase_ : List[str] = decoder.decode_beams_batch(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : str = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] ,decoded_processor.text )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,decoded_processor.logit_score )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,decoded_processor.lm_score )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = self.get_feature_extractor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_decoder()
UpperCAmelCase_ : Any = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = self._get_dummy_logits()
UpperCAmelCase_ : List[Any] = 15
UpperCAmelCase_ : Optional[Any] = -20.0
UpperCAmelCase_ : Tuple = -4.0
UpperCAmelCase_ : Union[str, Any] = processor.batch_decode(
_SCREAMING_SNAKE_CASE ,beam_width=_SCREAMING_SNAKE_CASE ,beam_prune_logp=_SCREAMING_SNAKE_CASE ,token_min_logp=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : List[Any] = decoded_processor_out.text
UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase_ : List[str] = decoder.decode_beams_batch(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,beam_width=_SCREAMING_SNAKE_CASE ,beam_prune_logp=_SCREAMING_SNAKE_CASE ,token_min_logp=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : str = [d[0][0] for d in decoded_decoder_out]
UpperCAmelCase_ : Union[str, Any] = [d[0][2] for d in decoded_decoder_out]
UpperCAmelCase_ : Dict = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] ,_SCREAMING_SNAKE_CASE )
self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.0_54, -18.4_47] ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.5_54, -13.94_74] ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.get_feature_extractor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = self.get_decoder()
UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = self._get_dummy_logits()
UpperCAmelCase_ : List[Any] = 2.0
UpperCAmelCase_ : Optional[int] = 5.0
UpperCAmelCase_ : List[Any] = -20.0
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : str = processor.batch_decode(
_SCREAMING_SNAKE_CASE ,alpha=_SCREAMING_SNAKE_CASE ,beta=_SCREAMING_SNAKE_CASE ,unk_score_offset=_SCREAMING_SNAKE_CASE ,lm_score_boundary=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Tuple = decoded_processor_out.text
UpperCAmelCase_ : Optional[Any] = list(_SCREAMING_SNAKE_CASE )
decoder.reset_params(
alpha=_SCREAMING_SNAKE_CASE ,beta=_SCREAMING_SNAKE_CASE ,unk_score_offset=_SCREAMING_SNAKE_CASE ,lm_score_boundary=_SCREAMING_SNAKE_CASE ,)
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase_ : Optional[int] = decoder.decode_beams_batch(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : List[str] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-20.0 )
self.assertEqual(lm_model.score_boundary ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase_ : Tuple = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase_ : Dict = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase_ : Any = os.listdir(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : int = snapshot_download('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase_ : List[str] = WavaVecaProcessorWithLM.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase_ : List[str] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase_ : List[Any] = os.listdir(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = os.listdir(_SCREAMING_SNAKE_CASE )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase_ : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase_ : Tuple = floats_list((3, 1_000) )
UpperCAmelCase_ : Optional[Any] = processor_wavaveca(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
UpperCAmelCase_ : List[str] = processor_auto(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1e-2 )
UpperCAmelCase_ : Any = self._get_dummy_logits()
UpperCAmelCase_ : int = processor_wavaveca.batch_decode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = processor_auto.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = self.get_feature_extractor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Any = self.get_decoder()
UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)
@staticmethod
def a__ ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : int = [d[key] for d in offsets]
return retrieved_list
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : List[str] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase_ : Union[str, Any] = self._get_dummy_logits()[0]
UpperCAmelCase_ : Tuple = processor.decode(_SCREAMING_SNAKE_CASE ,output_word_offsets=_SCREAMING_SNAKE_CASE )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''end_offset''' ) ,[1, 3, 5] )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase_ : int = self._get_dummy_logits()
UpperCAmelCase_ : List[str] = processor.batch_decode(_SCREAMING_SNAKE_CASE ,output_word_offsets=_SCREAMING_SNAKE_CASE )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''word''' ) ) for o in outputs['''word_offsets''']] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''end_offset''' ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a__ ( self ) -> Union[str, Any]:
import torch
UpperCAmelCase_ : List[str] = load_dataset('''common_voice''' ,'''en''' ,split='''train''' ,streaming=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = ds.cast_column('''audio''' ,datasets.Audio(sampling_rate=16_000 ) )
UpperCAmelCase_ : Tuple = iter(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = next(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
UpperCAmelCase_ : Dict = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCAmelCase_ : List[str] = processor(sample['''audio''']['''array'''] ,return_tensors='''pt''' ).input_values
with torch.no_grad():
UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE ).logits.cpu().numpy()
UpperCAmelCase_ : str = processor.decode(logits[0] ,output_word_offsets=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCAmelCase_ : Any = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
UpperCAmelCase_ : Any = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''word''' ) ) ,_SCREAMING_SNAKE_CASE )
self.assertEqual(''' '''.join(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''word''' ) ) ,output.text )
# output times
UpperCAmelCase_ : List[Any] = torch.tensor(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''start_time''' ) )
UpperCAmelCase_ : str = torch.tensor(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''end_time''' ) )
# fmt: off
UpperCAmelCase_ : str = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] )
UpperCAmelCase_ : Optional[int] = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=0.01 ) )
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=0.01 ) )
| 300
| 0
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = 0
_lowercase = False
_lowercase = 3.0
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def __lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
SCREAMING_SNAKE_CASE_ : Tuple =GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
SCREAMING_SNAKE_CASE_ : Any =Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
SCREAMING_SNAKE_CASE_ : List[Any] =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , __UpperCAmelCase )
@require_multi_gpu
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] =['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__SCREAMING_SNAKE_CASE = Accelerator(kwargs_handlers=[ddp_scaler])
__SCREAMING_SNAKE_CASE = torch.nn.Linear(100, 200)
__SCREAMING_SNAKE_CASE = accelerator.prepare(model)
# Check the values changed in kwargs
__SCREAMING_SNAKE_CASE = ''
__SCREAMING_SNAKE_CASE = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 220
|
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__SCREAMING_SNAKE_CASE = 'hf-internal-testing/tiny-random-bert'
__SCREAMING_SNAKE_CASE = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
__SCREAMING_SNAKE_CASE = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =cached_file(__UpperCAmelCase , __UpperCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__UpperCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) ) )
with open(os.path.join(__UpperCAmelCase , 'refs' , 'main' ) ) as f:
SCREAMING_SNAKE_CASE_ : int =f.read()
self.assertEqual(__UpperCAmelCase , os.path.join(__UpperCAmelCase , 'snapshots' , __UpperCAmelCase , __UpperCAmelCase ) )
self.assertTrue(os.path.isfile(__UpperCAmelCase ) )
# File is cached at the same place the second time.
SCREAMING_SNAKE_CASE_ : Optional[int] =cached_file(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# Using a specific revision to test the full commit hash.
SCREAMING_SNAKE_CASE_ : Optional[int] =cached_file(__UpperCAmelCase , __UpperCAmelCase , revision='9b8c223' )
self.assertEqual(__UpperCAmelCase , os.path.join(__UpperCAmelCase , 'snapshots' , __UpperCAmelCase , __UpperCAmelCase ) )
def __lowerCamelCase ( self ):
with self.assertRaisesRegex(__UpperCAmelCase , 'is not a valid model identifier' ):
SCREAMING_SNAKE_CASE_ : Dict =cached_file('tiny-random-bert' , __UpperCAmelCase )
with self.assertRaisesRegex(__UpperCAmelCase , 'is not a valid git identifier' ):
SCREAMING_SNAKE_CASE_ : List[Any] =cached_file(__UpperCAmelCase , __UpperCAmelCase , revision='aaaa' )
with self.assertRaisesRegex(__UpperCAmelCase , 'does not appear to have a file named' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =cached_file(__UpperCAmelCase , 'conf' )
def __lowerCamelCase ( self ):
with self.assertRaisesRegex(__UpperCAmelCase , 'does not appear to have a file named' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =cached_file(__UpperCAmelCase , 'conf' )
with open(os.path.join(__UpperCAmelCase , 'refs' , 'main' ) ) as f:
SCREAMING_SNAKE_CASE_ : Any =f.read()
self.assertTrue(os.path.isfile(os.path.join(__UpperCAmelCase , '.no_exist' , __UpperCAmelCase , 'conf' ) ) )
SCREAMING_SNAKE_CASE_ : str =cached_file(__UpperCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=__UpperCAmelCase )
self.assertIsNone(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict =cached_file(__UpperCAmelCase , 'conf' , local_files_only=__UpperCAmelCase , _raise_exceptions_for_missing_entries=__UpperCAmelCase )
self.assertIsNone(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str =mock.Mock()
SCREAMING_SNAKE_CASE_ : List[str] =500
SCREAMING_SNAKE_CASE_ : Tuple ={}
SCREAMING_SNAKE_CASE_ : str =HTTPError
SCREAMING_SNAKE_CASE_ : Optional[int] ={}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=__UpperCAmelCase ) as mock_head:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =cached_file(__UpperCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=__UpperCAmelCase )
self.assertIsNone(__UpperCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCamelCase ( self ):
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , __UpperCAmelCase ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , __UpperCAmelCase ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , __UpperCAmelCase ) )
def __lowerCamelCase ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__UpperCAmelCase , 'is not a valid model identifier' ):
get_file_from_repo('bert-base-case' , __UpperCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__UpperCAmelCase , 'is not a valid git identifier' ):
get_file_from_repo('bert-base-cased' , __UpperCAmelCase , revision='ahaha' )
SCREAMING_SNAKE_CASE_ : Tuple =get_file_from_repo('bert-base-cased' , __UpperCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
SCREAMING_SNAKE_CASE_ : List[str] =json.loads(open(__UpperCAmelCase , 'r' ).read() )
self.assertEqual(config['hidden_size'] , 768 )
def __lowerCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : int =Path(__UpperCAmelCase ) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(__UpperCAmelCase , 'a.txt' ) , str(__UpperCAmelCase ) )
self.assertIsNone(get_file_from_repo(__UpperCAmelCase , 'b.txt' ) )
| 220
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowercase_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'sequence-classification'
def __init__( self , __A ) -> List[Any]:
if type(__A ) == dict:
_lowerCAmelCase =Namespace(**__A )
_lowerCAmelCase =glue_output_modes[hparams.task]
_lowerCAmelCase =glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def UpperCamelCase__ ( self , **__A ) -> Any:
return self.model(**__A )
def UpperCamelCase__ ( self , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase =outputs[0]
_lowerCAmelCase =self.trainer.lr_schedulers[0]['scheduler']
_lowerCAmelCase ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.hparams
_lowerCAmelCase =processors[args.task]()
_lowerCAmelCase =processor.get_labels()
for mode in ["train", "dev"]:
_lowerCAmelCase =self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , __A )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_lowerCAmelCase =(
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_lowerCAmelCase =convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , __A )
torch.save(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A = False ) -> DataLoader:
_lowerCAmelCase ='dev' if mode == 'test' else mode
_lowerCAmelCase =self._feature_file(__A )
logger.info('Loading features from cached file %s' , __A )
_lowerCAmelCase =torch.load(__A )
_lowerCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def UpperCamelCase__ ( self , __A , __A ) -> List[str]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase , _lowerCAmelCase =outputs[:2]
_lowerCAmelCase =logits.detach().cpu().numpy()
_lowerCAmelCase =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __A ) -> tuple:
_lowerCAmelCase =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_lowerCAmelCase =np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =np.squeeze(__A )
_lowerCAmelCase =np.concatenate([x['target'] for x in outputs] , axis=0 )
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_lowerCAmelCase =dict(results.items() )
_lowerCAmelCase =results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __A , __A ) -> Any:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'--max_seq_length' , default=128 , type=__A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=__A , required=__A , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=__A , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser()
add_generic_args(a__ , os.getcwd() )
_lowerCAmelCase =GLUETransformer.add_model_specific_args(a__ , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_lowerCAmelCase =os.path.join(
'./results' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_lowerCAmelCase =GLUETransformer(a__ )
_lowerCAmelCase =generic_train(a__ , a__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_lowerCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=a__ ) )
_lowerCAmelCase =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(a__ )
if __name__ == "__main__":
main()
| 58
| 1
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _A ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase =params
__UpperCAmelCase =np.array(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =np.array([len(__SCREAMING_SNAKE_CASE ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : int , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
return (self.token_ids[index], self.lengths[index])
def __len__( self : int ) -> Optional[Any]:
return len(self.lengths )
def _a ( self : Dict ) -> List[str]:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _a ( self : List[Any] ) -> List[Any]:
__UpperCAmelCase =self.params.max_model_input_size
__UpperCAmelCase =self.lengths > max_len
logger.info(f'''Splitting {sum(__SCREAMING_SNAKE_CASE )} too long sequences.''' )
def divide_chunks(__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple ):
return [l[i : i + n] for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )]
__UpperCAmelCase =[]
__UpperCAmelCase =[]
if self.params.mlm:
__UpperCAmelCase , __UpperCAmelCase =self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
__UpperCAmelCase , __UpperCAmelCase =self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__UpperCAmelCase =[]
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__UpperCAmelCase =np.insert(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE )
if sub_s[-1] != sep_id:
__UpperCAmelCase =np.insert(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
assert len(__SCREAMING_SNAKE_CASE ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__SCREAMING_SNAKE_CASE )
new_tok_ids.extend(__SCREAMING_SNAKE_CASE )
new_lengths.extend([len(__SCREAMING_SNAKE_CASE ) for l in sub_seqs] )
__UpperCAmelCase =np.array(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =np.array(__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase =len(self )
__UpperCAmelCase =self.lengths > 11
__UpperCAmelCase =self.token_ids[indices]
__UpperCAmelCase =self.lengths[indices]
__UpperCAmelCase =len(self )
logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def _a ( self : int ) -> Any:
if "unk_token" not in self.params.special_tok_ids:
return
else:
__UpperCAmelCase =self.params.special_tok_ids["""unk_token"""]
__UpperCAmelCase =len(self )
__UpperCAmelCase =np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__UpperCAmelCase =(unk_occs / self.lengths) < 0.5
__UpperCAmelCase =self.token_ids[indices]
__UpperCAmelCase =self.lengths[indices]
__UpperCAmelCase =len(self )
logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def _a ( self : Any ) -> Tuple:
if not self.params.is_master:
return
logger.info(f'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Any ) -> Tuple:
__UpperCAmelCase =[t[0] for t in batch]
__UpperCAmelCase =[t[1] for t in batch]
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
# Max for paddings
__UpperCAmelCase =max(__SCREAMING_SNAKE_CASE )
# Pad token ids
if self.params.mlm:
__UpperCAmelCase =self.params.special_tok_ids["""pad_token"""]
else:
__UpperCAmelCase =self.params.special_tok_ids["""unk_token"""]
__UpperCAmelCase =[list(t.astype(__SCREAMING_SNAKE_CASE ) ) + [pad_idx] * (max_seq_len_ - len(__SCREAMING_SNAKE_CASE )) for t in token_ids]
assert len(tk_ ) == len(__SCREAMING_SNAKE_CASE )
assert all(len(__SCREAMING_SNAKE_CASE ) == max_seq_len_ for t in tk_ )
__UpperCAmelCase =torch.tensor(tk_ ) # (bs, max_seq_len_)
__UpperCAmelCase =torch.tensor(__SCREAMING_SNAKE_CASE ) # (bs)
return tk_t, lg_t
| 68
|
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 604
| 0
|
import os
import sys
import unittest
UpperCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCamelCase__ = os.path.join(git_repo_path, '''src''', '''transformers''')
UpperCamelCase__ = '''
{0} = None
'''
UpperCamelCase__ = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
UpperCamelCase__ = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class __lowercase ( unittest.TestCase ):
def __magic_name__ ( self : List[Any] ):
a_ = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(lowercase__ )
a_ = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(lowercase__ , '''tokenizers''' )
a_ = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(lowercase__ , '''tensorflow_text''' )
a_ = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(lowercase__ , '''sentencepiece_and_tokenizers''' )
a_ = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(lowercase__ , '''sentencepiece_and_tensorflow_text''' )
a_ = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(lowercase__ , '''sentencepiece_and_tokenizers_and_vision''' )
def __magic_name__ ( self : str ):
a_ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowercase__ )
self.assertIn('''tensorflow_text''' , lowercase__ )
self.assertIn('''sentencepiece_and_tokenizers''' , lowercase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def __magic_name__ ( self : Optional[int] ):
a_ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(lowercase__ , '''\nCONSTANT = None\n''' )
a_ = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
lowercase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
a_ = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
a_ = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(lowercase__ , lowercase__ )
def __magic_name__ ( self : Optional[Any] ):
a_ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
a_ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , lowercase__ )
| 702
|
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
UpperCamelCase__ = HUGGINGFACE_HUB_CACHE
UpperCamelCase__ = '''config.json'''
UpperCamelCase__ = '''diffusion_pytorch_model.bin'''
UpperCamelCase__ = '''diffusion_flax_model.msgpack'''
UpperCamelCase__ = '''model.onnx'''
UpperCamelCase__ = '''diffusion_pytorch_model.safetensors'''
UpperCamelCase__ = '''weights.pb'''
UpperCamelCase__ = '''https://huggingface.co'''
UpperCamelCase__ = default_cache_path
UpperCamelCase__ = '''diffusers_modules'''
UpperCamelCase__ = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
UpperCamelCase__ = ['''fp16''', '''non-ema''']
UpperCamelCase__ = '''.self_attn'''
| 143
| 0
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float ) -> float:
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float , ) -> float:
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
__UpperCamelCase , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 144
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[int] ) -> Tuple:
_UpperCAmelCase : Union[str, Any] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_lowercase = StableDiffusionLatentUpscalePipeline
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
_lowercase = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowercase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowercase = frozenset([] )
_lowercase = True
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = 1
_UpperCAmelCase : List[str] = 4
_UpperCAmelCase : Dict = (16, 16)
_UpperCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A_ )
return image
def _UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=A_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=A_ , only_cross_attention=A_ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
_UpperCAmelCase : Optional[int] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
_UpperCAmelCase : Union[str, Any] = EulerDiscreteScheduler(prediction_type="sample" )
_UpperCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="quick_gelu" , projection_dim=512 , )
_UpperCAmelCase : Optional[Any] = CLIPTextModel(A_ )
_UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase : List[Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _UpperCAmelCase ( self , A_ , A_=0 ):
'''simple docstring'''
if str(A_ ).startswith("mps" ):
_UpperCAmelCase : Union[str, Any] = torch.manual_seed(A_ )
else:
_UpperCAmelCase : Tuple = torch.Generator(device=A_ ).manual_seed(A_ )
_UpperCAmelCase : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = "cpu"
_UpperCAmelCase : List[Any] = self.get_dummy_components()
_UpperCAmelCase : Any = self.pipeline_class(**A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : List[str] = self.get_dummy_inputs(A_ )
_UpperCAmelCase : Optional[Any] = pipe(**A_ ).images
_UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
_UpperCAmelCase : List[str] = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] )
_UpperCAmelCase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A_ , 1e-3 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
_UpperCAmelCase : Dict = self.get_dummy_components()
_UpperCAmelCase : Any = self.pipeline_class(**A_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : Tuple = self.get_dummy_inputs(A_ )
_UpperCAmelCase : List[Any] = 2
_UpperCAmelCase : Union[str, Any] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_UpperCAmelCase : List[Any] = getattr(A_ , scheduler_enum.name )
_UpperCAmelCase : int = scheduler_cls.from_config(pipe.scheduler.config )
_UpperCAmelCase : Tuple = pipe(**A_ )[0]
outputs.append(A_ )
assert check_same_shape(A_ )
@require_torch_gpu
@slow
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = torch.manual_seed(33 )
_UpperCAmelCase : Dict = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
_UpperCAmelCase : Dict = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
_UpperCAmelCase : List[str] = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
_UpperCAmelCase : int = pipe(A_ , generator=A_ , output_type="latent" ).images
_UpperCAmelCase : List[Any] = upscaler(
prompt=A_ , image=A_ , num_inference_steps=20 , guidance_scale=0 , generator=A_ , output_type="np" , ).images[0]
_UpperCAmelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = torch.manual_seed(33 )
_UpperCAmelCase : str = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
_UpperCAmelCase : str = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
_UpperCAmelCase : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
_UpperCAmelCase : int = upscaler(
prompt=A_ , image=A_ , num_inference_steps=20 , guidance_scale=0 , generator=A_ , output_type="np" , ).images[0]
_UpperCAmelCase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5e-2
| 467
|
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: Tuple , lowerCAmelCase: Any=None , **lowerCAmelCase: Any ) -> Dict:
_UpperCAmelCase : Optional[Any] = [x.strip() for x in open(lowerCAmelCase ).readlines()]
_UpperCAmelCase : str = [x.strip() for x in open(lowerCAmelCase ).readlines()][: len(lowerCAmelCase )]
_UpperCAmelCase : str = calculate_rouge(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
if save_path is not None:
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 467
| 1
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase : List[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
UpperCamelCase : Optional[Any] = {
"""allenai/led-base-16384""": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def UpperCamelCase_ ( ) -> Union[str, Any]:
a__ : Optional[int] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
a__ : Dict = bs[:]
a__ : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__a )
cs.append(2**8 + n )
n += 1
a__ : Any = [chr(__a ) for n in cs]
return dict(zip(__a , __a ) )
def UpperCamelCase_ ( __a ) -> Optional[int]:
a__ : Tuple = set()
a__ : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a__ : Dict = char
return pairs
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ['input_ids', 'attention_mask']
def __init__( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict="replace" , lowerCamelCase__ : int="<s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : Optional[int]="</s>" , lowerCamelCase__ : Dict="<s>" , lowerCamelCase__ : Union[str, Any]="<unk>" , lowerCamelCase__ : Optional[int]="<pad>" , lowerCamelCase__ : Dict="<mask>" , lowerCamelCase__ : Optional[int]=False , **lowerCamelCase__ : int , ):
a__ : Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
a__ : List[str] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
a__ : Dict = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
a__ : Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
a__ : Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
a__ : Tuple = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a__ : List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
with open(lowerCamelCase__ , encoding="utf-8" ) as vocab_handle:
a__ : List[str] = json.load(lowerCamelCase__ )
a__ : List[str] = {v: k for k, v in self.encoder.items()}
a__ : int = errors # how to handle errors in decoding
a__ : List[str] = bytes_to_unicode()
a__ : int = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase__ , encoding="utf-8" ) as merges_handle:
a__ : List[str] = merges_handle.read().split("\n" )[1:-1]
a__ : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
a__ : Tuple = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
a__ : Union[str, Any] = {}
a__ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a__ : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _UpperCamelCase( self : Dict ):
return len(self.encoder )
def _UpperCamelCase( self : Tuple ):
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any] ):
if token in self.cache:
return self.cache[token]
a__ : str = tuple(lowerCamelCase__ )
a__ : Dict = get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
a__ : List[str] = min(lowerCamelCase__ , key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
a__, a__ : Union[str, Any] = bigram
a__ : Any = []
a__ : Union[str, Any] = 0
while i < len(lowerCamelCase__ ):
try:
a__ : Dict = word.index(lowerCamelCase__ , lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a__ : List[str] = j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a__ : Tuple = tuple(lowerCamelCase__ )
a__ : Union[str, Any] = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
a__ : str = get_pairs(lowerCamelCase__ )
a__ : Union[str, Any] = " ".join(lowerCamelCase__ )
a__ : Union[str, Any] = word
return word
def _UpperCamelCase( self : int , lowerCamelCase__ : Tuple ):
a__ : Optional[int] = []
for token in re.findall(self.pat , lowerCamelCase__ ):
a__ : Union[str, Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(" " ) )
return bpe_tokens
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Any ):
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str ):
return self.decoder.get(lowerCamelCase__ )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Any ):
a__ : Optional[Any] = "".join(lowerCamelCase__ )
a__ : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ : Dict = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
a__ : List[Any] = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + "\n" )
a__ : Dict = 0
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
a__ : List[Any] = token_index
writer.write(" ".join(lowerCamelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__ : str = [self.cls_token_id]
a__ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase( self : Any , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
a__ : int = [self.sep_token_id]
a__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int]=False , **lowerCamelCase__ : List[Any] ):
a__ : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()):
a__ : List[Any] = " " + text
return (text, kwargs)
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ):
a__ : Dict = super()._pad(
encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
a__ : int = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a__ : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a__ : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase__ )
if needs_to_be_padded:
a__ : int = len(lowerCamelCase__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a__ : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
a__ : Dict = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 37
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""OPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OPTForCausalLM""",
"""OPTModel""",
"""OPTPreTrainedModel""",
"""OPTForSequenceClassification""",
"""OPTForQuestionAnswering""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""FlaxOPTForCausalLM""",
"""FlaxOPTModel""",
"""FlaxOPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 437
| 0
|
def a_ ( _A = 600851475143 ) -> int:
"""simple docstring"""
try:
snake_case__ = int(_A )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
snake_case__ = 1
snake_case__ = 2
while i * i <= n:
while n % i == 0:
snake_case__ = i
n //= i
i += 1
if n > 1:
snake_case__ = n
return int(_A )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 702
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase : Optional[Any] = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: Path , UpperCamelCase: Union[str, None] = None , UpperCamelCase: Union[List[str], None] = None , UpperCamelCase: Union[str, List[str], None] = None , UpperCamelCase: bool = True , ) -> int:
snake_case__ = [file for file in os.listdir(UpperCamelCase ) if os.path.isfile(os.path.join(UpperCamelCase , UpperCamelCase ) )]
if identifier is not None:
snake_case__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(UpperCamelCase , UpperCamelCase ):
for n_ in n_identifier:
snake_case__ = [file for file in files if n_ not in file]
else:
snake_case__ = [file for file in files if n_identifier not in file]
snake_case__ = ignore_files or []
ignore_files.append('__init__.py' )
snake_case__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , UpperCamelCase )
if only_modules:
snake_case__ = file.split('.' )[0]
try:
snake_case__ = getattr(UpperCamelCase , UpperCamelCase )
snake_case__ = doctest.DocTestSuite(UpperCamelCase )
snake_case__ = unittest.TextTestRunner().run(UpperCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
snake_case__ = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowerCAmelCase_ ( self: Tuple ) -> int:
snake_case__ = Path('src/transformers' )
snake_case__ = 'modeling'
snake_case__ = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(UpperCamelCase , identifier=UpperCamelCase , ignore_files=UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[int] ) -> str:
snake_case__ = Path('src/transformers' )
snake_case__ = 'tokenization'
self.analyze_directory(UpperCamelCase , identifier=UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] ) -> int:
snake_case__ = Path('src/transformers' )
snake_case__ = 'configuration'
self.analyze_directory(UpperCamelCase , identifier=UpperCamelCase )
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
snake_case__ = Path('src/transformers' )
snake_case__ = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(UpperCamelCase , n_identifier=UpperCamelCase )
def lowerCAmelCase_ ( self: Tuple ) -> Union[str, Any]:
snake_case__ = Path('docs/source' )
snake_case__ = ['favicon.ico']
self.analyze_directory(UpperCamelCase , ignore_files=UpperCamelCase , only_modules=UpperCamelCase )
| 372
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def _UpperCamelCase ( ) -> Generator[int, None, None]:
'''simple docstring'''
snake_case : Dict = {}
snake_case : Optional[Any] = 2
while True:
snake_case : Union[str, Any] = factor_map.pop(_UpperCAmelCase , _UpperCAmelCase )
if factor:
snake_case : List[str] = factor + prime
while x in factor_map:
x += factor
snake_case : Union[str, Any] = factor
else:
snake_case : Any = prime
yield prime
prime += 1
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = 1E1_0 ) -> int:
'''simple docstring'''
snake_case : str = sieve()
snake_case : str = 1
while True:
snake_case : Optional[int] = next(_UpperCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(_UpperCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 638
|
from math import pi, sqrt, tan
def UpperCAmelCase_ ( _UpperCAmelCase :float ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def UpperCAmelCase_ ( _UpperCAmelCase :float , _UpperCAmelCase :float , _UpperCAmelCase :float ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def UpperCAmelCase_ ( _UpperCAmelCase :float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def UpperCAmelCase_ ( _UpperCAmelCase :float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def UpperCAmelCase_ ( _UpperCAmelCase :float , _UpperCAmelCase :float ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def UpperCAmelCase_ ( _UpperCAmelCase :float , _UpperCAmelCase :float , _UpperCAmelCase :float ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
A_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def UpperCAmelCase_ ( _UpperCAmelCase :float , _UpperCAmelCase :float ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def UpperCAmelCase_ ( _UpperCAmelCase :float , _UpperCAmelCase :float ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(_UpperCAmelCase , 2 ) * torus_radius * tube_radius
def UpperCAmelCase_ ( _UpperCAmelCase :float , _UpperCAmelCase :float ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def UpperCAmelCase_ ( _UpperCAmelCase :float ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def UpperCAmelCase_ ( _UpperCAmelCase :float , _UpperCAmelCase :float ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def UpperCAmelCase_ ( _UpperCAmelCase :float , _UpperCAmelCase :float , _UpperCAmelCase :float ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
A_ = (sidea + sidea + sidea) / 2
A_ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def UpperCAmelCase_ ( _UpperCAmelCase :float , _UpperCAmelCase :float ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def UpperCAmelCase_ ( _UpperCAmelCase :float , _UpperCAmelCase :float , _UpperCAmelCase :float ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def UpperCAmelCase_ ( _UpperCAmelCase :float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def UpperCAmelCase_ ( _UpperCAmelCase :float , _UpperCAmelCase :float ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def UpperCAmelCase_ ( _UpperCAmelCase :float , _UpperCAmelCase :float ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def UpperCAmelCase_ ( _UpperCAmelCase :int , _UpperCAmelCase :float ) -> float:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f'''Rectangle: {area_rectangle(10, 20) = }''')
print(f'''Square: {area_square(10) = }''')
print(f'''Triangle: {area_triangle(10, 10) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(f'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(f'''Rhombus: {area_rhombus(10, 20) = }''')
print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(f'''Circle: {area_circle(20) = }''')
print(f'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(f'''Cube: {surface_area_cube(20) = }''')
print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(f'''Sphere: {surface_area_sphere(20) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(f'''Cone: {surface_area_cone(10, 20) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(f'''Torus: {surface_area_torus(20, 10) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(f'''Square: {area_reg_polygon(4, 10) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 188
| 0
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : List[Any] = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def _UpperCAmelCase ( self , a=0 ) -> Optional[Any]:
lowercase__ : Optional[int] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(a ) )
lowercase__ : List[Any] = np.random.RandomState(a )
lowercase__ : int = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=a )
lowercase__ : int = self.get_dummy_inputs()
lowercase__ : Optional[Any] = pipe(**a ).images
lowercase__ : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : Union[str, Any] = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ : Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a )
pipe.set_progress_bar_config(disable=a )
lowercase__ : List[Any] = self.get_dummy_inputs()
lowercase__ : str = pipe(**a ).images
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : Union[str, Any] = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
# warmup pass to apply optimizations
lowercase__ : str = pipe(**self.get_dummy_inputs() )
lowercase__ : Optional[int] = self.get_dummy_inputs()
lowercase__ : int = pipe(**a ).images
lowercase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : str = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ : Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
lowercase__ : List[str] = self.get_dummy_inputs()
lowercase__ : Tuple = pipe(**a ).images
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : List[Any] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ : Optional[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
lowercase__ : str = self.get_dummy_inputs()
lowercase__ : int = pipe(**a ).images
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : Optional[int] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
lowercase__ : Optional[Any] = self.get_dummy_inputs()
lowercase__ : Tuple = pipe(**a ).images
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : List[Any] = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def _UpperCAmelCase ( self ) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Tuple = ort.SessionOptions()
lowercase__ : List[Any] = False
return options
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowercase__ : Optional[int] = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
lowercase__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
lowercase__ : int = 'A fantasy landscape, trending on artstation'
lowercase__ : int = np.random.RandomState(0 )
lowercase__ : str = pipe(
prompt=a , image=a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=a , output_type='np' , )
lowercase__ : List[str] = output.images
lowercase__ : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
lowercase__ : Optional[Any] = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowercase__ : Optional[int] = init_image.resize((7_6_8, 5_1_2) )
lowercase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
lowercase__ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
lowercase__ : str = 'A fantasy landscape, trending on artstation'
lowercase__ : Any = np.random.RandomState(0 )
lowercase__ : Optional[int] = pipe(
prompt=a , image=a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=a , output_type='np' , )
lowercase__ : str = output.images
lowercase__ : Tuple = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
lowercase__ : List[str] = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 708
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
@staticmethod
def _UpperCAmelCase ( *a , **a ) -> int:
pass
def a_ ( _lowerCAmelCase : Image ):
'''simple docstring'''
lowercase__ : List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Union[str, Any] = DepthEstimationPipeline(model=a , image_processor=a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , a , a ) -> Optional[int]:
lowercase__ : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , a )
import datasets
lowercase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowercase__ : List[Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , a , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = 'Intel/dpt-large'
lowercase__ : Optional[int] = pipeline('depth-estimation' , model=a )
lowercase__ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
lowercase__ : Optional[Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 645
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.