code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[Any] = "Speech2TextFeatureExtractor"
_lowerCamelCase :Optional[int] = "Speech2TextTokenizer"
def __init__( self : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
super().__init__(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : int = self.feature_extractor
lowerCAmelCase__ : Any = False
def __call__( self : List[str] , *UpperCamelCase : int , **UpperCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase , **UpperCamelCase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
lowerCAmelCase__ : str = kwargs.pop("""raw_speech""" )
else:
lowerCAmelCase__ : int = kwargs.pop("""audio""" , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = kwargs.pop("""sampling_rate""" , UpperCamelCase )
lowerCAmelCase__ : Tuple = kwargs.pop("""text""" , UpperCamelCase )
if len(UpperCamelCase ) > 0:
lowerCAmelCase__ : Optional[int] = args[0]
lowerCAmelCase__ : List[Any] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
lowerCAmelCase__ : str = self.feature_extractor(UpperCamelCase , *UpperCamelCase , sampling_rate=UpperCamelCase , **UpperCamelCase )
if text is not None:
lowerCAmelCase__ : Optional[int] = self.tokenizer(UpperCamelCase , **UpperCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase__ : Dict = encodings["""input_ids"""]
return inputs
def _lowerCAmelCase ( self : str , *UpperCamelCase : Tuple , **UpperCamelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] , *UpperCamelCase : str , **UpperCamelCase : int ) -> Dict:
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@contextmanager
def _lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : List[str] = self.tokenizer
yield
lowerCAmelCase__ : List[Any] = self.feature_extractor
lowerCAmelCase__ : Any = False
| 242
|
"""simple docstring"""
import math
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
lowerCAmelCase__ : Any = len(__UpperCAmelCase )
lowerCAmelCase__ : int = int(math.floor(math.sqrt(__UpperCAmelCase ) ) )
lowerCAmelCase__ : Optional[int] = 0
while arr[min(__UpperCAmelCase , __UpperCAmelCase ) - 1] < x:
lowerCAmelCase__ : Any = step
step += int(math.floor(math.sqrt(__UpperCAmelCase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
lowerCAmelCase__ : List[Any] = prev + 1
if prev == min(__UpperCAmelCase , __UpperCAmelCase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_A = input("""Enter numbers separated by a comma:\n""").strip()
_A = [int(item) for item in user_input.split(""",""")]
_A = int(input("""Enter the number to be searched:\n"""))
_A = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(f"""Number {x} is at index {res}""")
| 242
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=__lowerCAmelCase ):
snake_case_ = ['''note_seq''']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
requires_backends(self, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
requires_backends(cls, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
requires_backends(cls, ['note_seq'] )
| 332
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=__lowerCAmelCase ):
snake_case_ = ['''note_seq''']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
requires_backends(self, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
requires_backends(cls, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
requires_backends(cls, ['note_seq'] )
| 332
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
__lowerCamelCase : List[str] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
__lowerCamelCase : Union[str, Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class a__ ( A__ ):
A = 'whisper'
A = ['past_key_values']
A = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[Any],_A : Any=5_1865,_A : Optional[int]=80,_A : List[str]=6,_A : List[str]=4,_A : List[Any]=6,_A : Tuple=4,_A : Any=1536,_A : List[str]=1536,_A : Union[str, Any]=0.0,_A : Dict=0.0,_A : str=5_0257,_A : Optional[int]=True,_A : Optional[Any]=True,_A : Union[str, Any]="gelu",_A : List[str]=256,_A : Union[str, Any]=0.0,_A : Union[str, Any]=0.0,_A : Union[str, Any]=0.0,_A : Union[str, Any]=0.02,_A : int=False,_A : Tuple=1500,_A : Optional[Any]=448,_A : List[Any]=5_0256,_A : Tuple=5_0256,_A : Dict=5_0256,_A : Dict=None,_A : Union[str, Any]=[220, 5_0256],_A : Optional[int]=False,_A : int=256,_A : str=False,_A : Optional[int]=0.05,_A : List[Any]=10,_A : Dict=2,_A : str=0.0,_A : Union[str, Any]=10,_A : Optional[int]=0,_A : List[Any]=7,**_A : int,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Any = num_mel_bins
SCREAMING_SNAKE_CASE_ : Dict = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = encoder_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = decoder_layers
SCREAMING_SNAKE_CASE_ : int = decoder_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ : Tuple = dropout
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE_ : Dict = activation_dropout
SCREAMING_SNAKE_CASE_ : Union[str, Any] = activation_function
SCREAMING_SNAKE_CASE_ : Union[str, Any] = init_std
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_layers
SCREAMING_SNAKE_CASE_ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_ : int = max_source_positions
SCREAMING_SNAKE_CASE_ : Any = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ : List[str] = classifier_proj_size
SCREAMING_SNAKE_CASE_ : Tuple = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ : List[Any] = apply_spec_augment
SCREAMING_SNAKE_CASE_ : Tuple = mask_time_prob
SCREAMING_SNAKE_CASE_ : List[str] = mask_time_length
SCREAMING_SNAKE_CASE_ : Optional[int] = mask_time_min_masks
SCREAMING_SNAKE_CASE_ : List[Any] = mask_feature_prob
SCREAMING_SNAKE_CASE_ : str = mask_feature_length
SCREAMING_SNAKE_CASE_ : Dict = mask_feature_min_masks
SCREAMING_SNAKE_CASE_ : Dict = median_filter_width
super().__init__(
pad_token_id=_A,bos_token_id=_A,eos_token_id=_A,is_encoder_decoder=_A,decoder_start_token_id=_A,suppress_tokens=_A,begin_suppress_tokens=_A,**_A,)
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE_ : str = {0: "batch"}
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_A,direction="inputs" )
return common_inputs
def __UpperCamelCase ( self : Any,_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional["TensorType"] = None,_A : int = 2_2050,_A : float = 5.0,_A : int = 220,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = OrderedDict()
SCREAMING_SNAKE_CASE_ : Tuple = OnnxConfig.generate_dummy_inputs(
self,preprocessor=preprocessor.feature_extractor,batch_size=_A,framework=_A,sampling_rate=_A,time_duration=_A,frequency=_A,)
SCREAMING_SNAKE_CASE_ : Optional[int] = encoder_inputs["input_features"].shape[2]
SCREAMING_SNAKE_CASE_ : int = encoder_sequence_length // 2 if self.use_past else seq_length
SCREAMING_SNAKE_CASE_ : Dict = super().generate_dummy_inputs(
preprocessor.tokenizer,_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = encoder_inputs.pop("input_features" )
SCREAMING_SNAKE_CASE_ : List[str] = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
SCREAMING_SNAKE_CASE_ : Any = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
return 1E-3
| 18
|
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
while b:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = b, a % b
return a
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase , a % b )
def _snake_case ( ):
"""simple docstring"""
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 18
| 1
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __a ( lowerCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
if "model" in orig_key:
UpperCAmelCase_= orig_key.replace("""model.""" ,"""""" )
if "norm1" in orig_key:
UpperCAmelCase_= orig_key.replace("""norm1""" ,"""attention.output.LayerNorm""" )
if "norm2" in orig_key:
UpperCAmelCase_= orig_key.replace("""norm2""" ,"""output.LayerNorm""" )
if "norm" in orig_key:
UpperCAmelCase_= orig_key.replace("""norm""" ,"""LayerNorm""" )
if "transformer" in orig_key:
UpperCAmelCase_= orig_key.split(""".""" )[0].split("""_""" )[-1]
UpperCAmelCase_= orig_key.replace(F"""transformer_{layer_num}""" ,F"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
UpperCAmelCase_= orig_key.replace("""mha.attn""" ,"""attention.self""" )
if "mha" in orig_key:
UpperCAmelCase_= orig_key.replace("""mha""" ,"""attention""" )
if "W_q" in orig_key:
UpperCAmelCase_= orig_key.replace("""W_q""" ,"""self.query""" )
if "W_k" in orig_key:
UpperCAmelCase_= orig_key.replace("""W_k""" ,"""self.key""" )
if "W_v" in orig_key:
UpperCAmelCase_= orig_key.replace("""W_v""" ,"""self.value""" )
if "ff1" in orig_key:
UpperCAmelCase_= orig_key.replace("""ff1""" ,"""intermediate.dense""" )
if "ff2" in orig_key:
UpperCAmelCase_= orig_key.replace("""ff2""" ,"""output.dense""" )
if "ff" in orig_key:
UpperCAmelCase_= orig_key.replace("""ff""" ,"""output.dense""" )
if "mlm_class" in orig_key:
UpperCAmelCase_= orig_key.replace("""mlm.mlm_class""" ,"""cls.predictions.decoder""" )
if "mlm" in orig_key:
UpperCAmelCase_= orig_key.replace("""mlm""" ,"""cls.predictions.transform""" )
if "cls" not in orig_key:
UpperCAmelCase_= """yoso.""" + orig_key
return orig_key
def __a ( lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_= orig_state_dict.pop(lowerCAmelCase_ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCAmelCase_= val
UpperCAmelCase_= orig_state_dict["""cls.predictions.decoder.bias"""]
UpperCAmelCase_= torch.arange(lowerCAmelCase_ ).expand((1, -1) ) + 2
return orig_state_dict
def __a ( lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_= torch.load(lowerCAmelCase_ ,map_location="""cpu""" )["""model_state_dict"""]
UpperCAmelCase_= YosoConfig.from_json_file(lowerCAmelCase_ )
UpperCAmelCase_= YosoForMaskedLM(lowerCAmelCase_ )
UpperCAmelCase_= convert_checkpoint_helper(config.max_position_embeddings ,lowerCAmelCase_ )
print(model.load_state_dict(lowerCAmelCase_ ) )
model.eval()
model.save_pretrained(lowerCAmelCase_ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__A = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 277
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowercase ( unittest.TestCase):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase_= tempfile.mkdtemp()
UpperCAmelCase_= BlipImageProcessor()
UpperCAmelCase_= GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
UpperCAmelCase_= BlipaProcessor(__UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **__UpperCAmelCase : Union[str, Any] ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__UpperCAmelCase : str ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
UpperCAmelCase_= [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_= [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
UpperCAmelCase_= BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_= self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCAmelCase_= self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
UpperCAmelCase_= BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= self.prepare_image_inputs()
UpperCAmelCase_= image_processor(__UpperCAmelCase , return_tensors="""np""" )
UpperCAmelCase_= processor(images=__UpperCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= """lower newer"""
UpperCAmelCase_= processor(text=__UpperCAmelCase )
UpperCAmelCase_= tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= """lower newer"""
UpperCAmelCase_= self.prepare_image_inputs()
UpperCAmelCase_= processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_= processor.batch_decode(__UpperCAmelCase )
UpperCAmelCase_= tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= """lower newer"""
UpperCAmelCase_= self.prepare_image_inputs()
UpperCAmelCase_= processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 277
| 1
|
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : Tuple = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : Tuple = {
'Salesforce/codegen-350M-mono': 2048,
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Any = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : Dict = CodeGenTokenizer
def __init__( self : str , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , __lowerCamelCase : int=None , __lowerCamelCase : List[Any]="<|endoftext|>" , __lowerCamelCase : str="<|endoftext|>" , __lowerCamelCase : List[Any]="<|endoftext|>" , __lowerCamelCase : List[Any]=False , **__lowerCamelCase : Optional[int] , ) -> Optional[int]:
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
if kwargs.pop("add_bos_token" , __lowerCamelCase ):
a = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
a = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) )
a = add_prefix_space
a = pre_tok_class(**__lowerCamelCase )
a = add_prefix_space
def __UpperCAmelCase ( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Optional[int] ) -> BatchEncoding:
a = kwargs.get("is_split_into_words" , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , *__lowerCamelCase : List[str] , **__lowerCamelCase : List[str] ) -> BatchEncoding:
a = kwargs.get("is_split_into_words" , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
a = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , __lowerCamelCase : bool = False , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[List[str]] = None , **__lowerCamelCase : int , ) -> str:
a = super().decode(
token_ids=__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase , **__lowerCamelCase , )
if truncate_before_pattern is not None and len(__lowerCamelCase ) > 0:
a = self.truncate(__lowerCamelCase , __lowerCamelCase )
return decoded_text
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] ) -> int:
def find_re(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple ):
a = pattern.search(__lowerCamelCase , __lowerCamelCase )
return m.start() if m else -1
a = [re.compile(__lowerCamelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
a = list(re.finditer("^print" , __lowerCamelCase , re.MULTILINE ) )
if len(__lowerCamelCase ) > 1:
a = completion[: prints[1].start()]
a = list(re.finditer("^def" , __lowerCamelCase , re.MULTILINE ) )
if len(__lowerCamelCase ) > 1:
a = completion[: defs[1].start()]
a = 0
a = [
pos for pos in [find_re(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) for terminal in terminals] if pos != -1
]
if len(__lowerCamelCase ) > 0:
return completion[: min(__lowerCamelCase )]
else:
return completion
| 107
|
'''simple docstring'''
def UpperCAmelCase_ (__a : list , __a : list , __a : int ):
"""simple docstring"""
_a : Optional[Any] = len(__a )
_a : int = [[0] * n for i in range(__a )]
for i in range(__a ):
_a : Tuple = y_points[i]
for i in range(2 , __a ):
for j in range(__a , __a ):
_a : Tuple = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271
| 0
|
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
snake_case__ : str = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case__ : int = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case__ : Tuple = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=1 , snake_case_="binary" , snake_case_=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = fa_score(
snake_case_ , snake_case_ , labels=snake_case_ , pos_label=snake_case_ , average=snake_case_ , sample_weight=snake_case_ )
return {"f1": float(snake_case_ ) if score.size == 1 else score}
| 274
|
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str = " " ):
"""simple docstring"""
UpperCAmelCase_ : str = []
UpperCAmelCase_ : List[Any] = 0
for index, char in enumerate(lowerCamelCase_ ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase_ : Optional[Any] = index + 1
elif index + 1 == len(lowerCamelCase_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 274
| 1
|
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case_ : int ):
snake_case__ : List[Any] = data
snake_case__ : Node | None = None
snake_case__ : Node | None = None
def __snake_case( _lowerCAmelCase ) -> None: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __snake_case( _lowerCAmelCase ) -> int:
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def __snake_case( _lowerCAmelCase ) -> bool:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __snake_case( ) -> None: # Main function for testing.
snake_case__ : int = Node(1 )
snake_case__ : Dict = Node(2 )
snake_case__ : str = Node(3 )
snake_case__ : int = Node(4 )
snake_case__ : List[Any] = Node(5 )
snake_case__ : List[Any] = Node(6 )
snake_case__ : Optional[Any] = Node(7 )
snake_case__ : Optional[Any] = Node(8 )
snake_case__ : Dict = Node(9 )
print(is_full_binary_tree(_lowerCAmelCase ) )
print(depth_of_tree(_lowerCAmelCase ) )
print("""Tree is: """ )
display(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 35
|
'''simple docstring'''
import argparse
import os
import re
__a = "src/transformers"
# Pattern that looks at the indentation in a line.
__a = re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
__a = re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__a = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
__a = re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__a = re.compile(R"\[([^\]]+)\]")
def __snake_case( _lowerCAmelCase ) -> List[Any]:
snake_case__ : int = _re_indent.search(_lowerCAmelCase )
return "" if search is None else search.groups()[0]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
snake_case__ : str = 0
snake_case__ : Union[str, Any] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCAmelCase ):
index += 1
snake_case__ : Tuple = ["""\n""".join(lines[:index] )]
else:
snake_case__ : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
snake_case__ : Optional[int] = [lines[index]]
index += 1
while index < len(_lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCAmelCase ) )
if index < len(_lowerCAmelCase ) - 1:
snake_case__ : str = [lines[index + 1]]
index += 1
else:
snake_case__ : int = []
else:
blocks.append("""\n""".join(_lowerCAmelCase ) )
snake_case__ : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCAmelCase ) > 0:
blocks.append("""\n""".join(_lowerCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCAmelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def __snake_case( _lowerCAmelCase ) -> Tuple:
def _inner(_lowerCAmelCase ):
return key(_lowerCAmelCase ).lower().replace("""_""" , """""" )
return _inner
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(_lowerCAmelCase ):
return x
if key is None:
snake_case__ : Optional[int] = noop
# Constants are all uppercase, they go first.
snake_case__ : Optional[int] = [obj for obj in objects if key(_lowerCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
snake_case__ : int = [obj for obj in objects if key(_lowerCAmelCase )[0].isupper() and not key(_lowerCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
snake_case__ : str = [obj for obj in objects if not key(_lowerCAmelCase )[0].isupper()]
snake_case__ : List[str] = ignore_underscore(_lowerCAmelCase )
return sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> int:
# This inner function sort imports between [ ].
def _replace(_lowerCAmelCase ):
snake_case__ : Union[str, Any] = match.groups()[0]
if "," not in imports:
return f"[{imports}]"
snake_case__ : int = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : List[str] = keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] ) + "]"
snake_case__ : str = import_statement.split("""\n""" )
if len(_lowerCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
snake_case__ : Dict = 2 if lines[1].strip() == """[""" else 1
snake_case__ : str = [(i, _re_strip_line.search(_lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
snake_case__ : str = sort_objects(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )
snake_case__ : Union[str, Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
snake_case__ : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
snake_case__ : List[Any] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : List[str] = keys[:-1]
snake_case__ : int = get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] )
return "\n".join(_lowerCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
snake_case__ : Optional[Any] = _re_bracket_content.sub(_replace , _lowerCAmelCase )
return import_statement
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=True ) -> Dict:
with open(_lowerCAmelCase , encoding="""utf-8""" ) as f:
snake_case__ : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
snake_case__ : Optional[int] = split_code_in_indented_blocks(
_lowerCAmelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
snake_case__ : Optional[Any] = main_blocks[block_idx]
snake_case__ : Dict = block.split("""\n""" )
# Get to the start of the imports.
snake_case__ : Dict = 0
while line_idx < len(_lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
snake_case__ : Union[str, Any] = len(_lowerCAmelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
snake_case__ : List[str] = """\n""".join(block_lines[line_idx:-1] )
snake_case__ : str = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
snake_case__ : Optional[int] = split_code_in_indented_blocks(_lowerCAmelCase , indent_level=_lowerCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
snake_case__ : Tuple = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
snake_case__ : Optional[Any] = [(pattern.search(_lowerCAmelCase ).groups()[0] if pattern.search(_lowerCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
snake_case__ : Dict = [(i, key) for i, key in enumerate(_lowerCAmelCase ) if key is not None]
snake_case__ : Union[str, Any] = [x[0] for x in sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
snake_case__ : List[Any] = 0
snake_case__ : Optional[Any] = []
for i in range(len(_lowerCAmelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
snake_case__ : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_lowerCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
snake_case__ : Dict = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCAmelCase ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(_lowerCAmelCase ) )
def __snake_case( _lowerCAmelCase=True ) -> Tuple:
snake_case__ : str = []
for root, _, files in os.walk(_lowerCAmelCase ):
if "__init__.py" in files:
snake_case__ : Union[str, Any] = sort_imports(os.path.join(_lowerCAmelCase , """__init__.py""" ) , check_only=_lowerCAmelCase )
if result:
snake_case__ : Union[str, Any] = [os.path.join(_lowerCAmelCase , """__init__.py""" )]
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"Would overwrite {len(_lowerCAmelCase )} files, run `make style`." )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__a = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 35
| 1
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowercase__ = 4
lowercase__ = 48
lowercase__ = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ = [6, 6, 6, 6]
lowercase__ = 60
lowercase__ = [6, 6, 6, 6]
lowercase__ = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ = 4
lowercase__ = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowercase__ = 1
lowercase__ = 1
lowercase__ = 126
lowercase__ = 7
lowercase__ = 255.0
lowercase__ = """"""
return config
def UpperCamelCase ( __magic_name__ : Dict , __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
lowercase__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase__ = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
lowercase__ = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
lowercase__ = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
lowercase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowercase__ = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowercase__ = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowercase__ = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowercase__ = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
lowercase__ = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
lowercase__ = """layernorm.weight"""
if name == "norm.bias":
lowercase__ = """layernorm.bias"""
if "conv_first" in name:
lowercase__ = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowercase__ = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowercase__ = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
lowercase__ = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
lowercase__ = name.replace("""upsample.2""" , """upsample.convolution_1""" )
lowercase__ = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
lowercase__ = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
lowercase__ = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
lowercase__ = """swin2sr.""" + name
return name
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : str ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase__ = orig_state_dict.pop(_a )
if "qkv" in key:
lowercase__ = key.split(""".""" )
lowercase__ = int(key_split[1] )
lowercase__ = int(key_split[4] )
lowercase__ = config.embed_dim
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[dim : dim * 2, :]
lowercase__ = val[-dim:, :]
else:
lowercase__ = val[:dim]
lowercase__ = val[dim : dim * 2]
lowercase__ = val[-dim:]
pass
else:
lowercase__ = val
return orig_state_dict
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> int:
"""simple docstring"""
lowercase__ = get_config(_a )
lowercase__ = SwinaSRForImageSuperResolution(_a )
model.eval()
lowercase__ = torch.hub.load_state_dict_from_url(_a , map_location="""cpu""" )
lowercase__ = convert_state_dict(_a , _a )
lowercase__ , lowercase__ = model.load_state_dict(_a , strict=_a )
if len(_a ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(_a ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
lowercase__ = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
lowercase__ = Image.open(requests.get(_a , stream=_a ).raw ).convert("""RGB""" )
lowercase__ = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowercase__ = 126 if """Jpeg""" in checkpoint_url else 256
lowercase__ = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowercase__ = transforms(_a ).unsqueeze(0 )
if config.num_channels == 1:
lowercase__ = pixel_values[:, 0, :, :].unsqueeze(1 )
lowercase__ = model(_a )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowercase__ = torch.Size([1, 3, 512, 512] )
lowercase__ = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ = torch.Size([1, 3, 1024, 1024] )
lowercase__ = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowercase__ = torch.Size([1, 3, 1024, 1024] )
lowercase__ = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ = torch.Size([1, 3, 512, 512] )
lowercase__ = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ = torch.Size([1, 3, 1024, 1024] )
lowercase__ = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _a , atol=1E-3 )
print("""Looks ok!""" )
lowercase__ = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
lowercase__ = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_a )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_a )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
A : Dict = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 361
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
lowercase__ = {
"""input_ids""": tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowercase__ = model(_UpperCAmelCase )["""last_hidden_state"""]
lowercase__ = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice.
lowercase__ = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 146
| 0
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__A = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
__A = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It\'s like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It\'s like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
__A = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def A__ ( self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , ) -> List[Any]:
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowercase__ = np.array([re.sub(__A , """""" , __A ) for x in predictions] )
lowercase__ = np.array([re.sub(__A , """""" , __A ) for x in references] )
else:
lowercase__ = np.asarray(__A )
lowercase__ = np.asarray(__A )
if ignore_case:
lowercase__ = np.char.lower(__A )
lowercase__ = np.char.lower(__A )
if ignore_punctuation:
lowercase__ = string.punctuation.maketrans("""""" , """""" , string.punctuation )
lowercase__ = np.char.translate(__A , table=__A )
lowercase__ = np.char.translate(__A , table=__A )
if ignore_numbers:
lowercase__ = string.digits.maketrans("""""" , """""" , string.digits )
lowercase__ = np.char.translate(__A , table=__A )
lowercase__ = np.char.translate(__A , table=__A )
lowercase__ = predictions == references
return {"exact_match": np.mean(__A ) * 100}
| 164
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : List[Any] = BioGptTokenizer
__A : Optional[int] = False
def _snake_case ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCamelCase : str = dict(zip(__A , range(len(__A ) ) ) )
lowerCamelCase : Dict = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(__A ) )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Dict = "lower newer"
lowerCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase : Optional[int] = "lower"
lowerCamelCase : Any = ["low", "er</w>"]
lowerCamelCase : List[str] = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
lowerCamelCase : Union[str, Any] = tokens + ["<unk>"]
lowerCamelCase : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
lowerCamelCase : Optional[int] = tokenizer.encode("sequence builders" , add_special_tokens=__A )
lowerCamelCase : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=__A )
lowerCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__A )
lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 283
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__: List[str] = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Tuple = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Any = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Union[str, Any] = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__magic_name__: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 360
|
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : torch.FloatTensor
lowercase__ : Optional[torch.FloatTensor] = None
def UpperCamelCase ( _A, _A=0.999, _A="cosine", ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__magic_name__ : Optional[Any] = []
for i in range(_A ):
__magic_name__ : Dict = i / num_diffusion_timesteps
__magic_name__ : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ), _A ) )
return torch.tensor(_A, dtype=torch.floataa )
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self , lowerCAmelCase__ = 10_00 , lowerCAmelCase__ = "fixed_small_log" , lowerCAmelCase__ = True , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = "epsilon" , lowerCAmelCase__ = "squaredcos_cap_v2" , ) -> Union[str, Any]:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
__magic_name__ : Tuple = betas_for_alpha_bar(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = 1.0 - self.betas
__magic_name__ : str = torch.cumprod(self.alphas , dim=0 )
__magic_name__ : Any = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__magic_name__ : Tuple = 1.0
# setable values
__magic_name__ : List[Any] = None
__magic_name__ : int = torch.from_numpy(np.arange(0 , lowerCAmelCase__ )[::-1].copy() )
__magic_name__ : List[Any] = variance_type
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> torch.FloatTensor:
return sample
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> str:
__magic_name__ : List[Any] = num_inference_steps
__magic_name__ : Union[str, Any] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__magic_name__ : List[Any] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__magic_name__ : Dict = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Tuple:
if prev_timestep is None:
__magic_name__ : int = t - 1
__magic_name__ : Optional[Any] = self.alphas_cumprod[t]
__magic_name__ : Any = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__magic_name__ : Tuple = 1 - alpha_prod_t
__magic_name__ : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__magic_name__ : List[str] = self.betas[t]
else:
__magic_name__ : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__magic_name__ : Dict = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__magic_name__ : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__magic_name__ : str = torch.log(torch.clamp(lowerCAmelCase__ , min=1e-2_0 ) )
__magic_name__ : Optional[Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__magic_name__ : List[str] = variance.log()
__magic_name__ : Optional[int] = beta.log()
__magic_name__ : Any = (predicted_variance + 1) / 2
__magic_name__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__ = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
__magic_name__ : List[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__magic_name__ ,__magic_name__ : List[Any] = torch.split(lowerCAmelCase__ , sample.shape[1] , dim=1 )
else:
__magic_name__ : List[str] = None
# 1. compute alphas, betas
if prev_timestep is None:
__magic_name__ : Union[str, Any] = t - 1
__magic_name__ : List[str] = self.alphas_cumprod[t]
__magic_name__ : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__magic_name__ : Any = 1 - alpha_prod_t
__magic_name__ : Dict = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__magic_name__ : Union[str, Any] = self.betas[t]
__magic_name__ : int = self.alphas[t]
else:
__magic_name__ : Tuple = 1 - alpha_prod_t / alpha_prod_t_prev
__magic_name__ : Tuple = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__magic_name__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__magic_name__ : Tuple = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__magic_name__ : Tuple = torch.clamp(
lowerCAmelCase__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ : List[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__magic_name__ : Dict = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__magic_name__ : Tuple = 0
if t > 0:
__magic_name__ : Any = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowerCAmelCase__ , device=model_output.device )
__magic_name__ : Tuple = self._get_variance(
lowerCAmelCase__ , predicted_variance=lowerCAmelCase__ , prev_timestep=lowerCAmelCase__ , )
if self.variance_type == "fixed_small_log":
__magic_name__ : Tuple = variance
elif self.variance_type == "learned_range":
__magic_name__ : int = (0.5 * variance).exp()
else:
raise ValueError(
F'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'
""" for the UnCLIPScheduler.""" )
__magic_name__ : Tuple = variance * variance_noise
__magic_name__ : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> torch.FloatTensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
__magic_name__ : List[str] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__magic_name__ : Any = timesteps.to(original_samples.device )
__magic_name__ : int = alphas_cumprod[timesteps] ** 0.5
__magic_name__ : Union[str, Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__magic_name__ : int = sqrt_alpha_prod.unsqueeze(-1 )
__magic_name__ : Any = (1 - alphas_cumprod[timesteps]) ** 0.5
__magic_name__ : str = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__magic_name__ : Any = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__magic_name__ : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 138
| 0
|
import pytest
import datasets
# Import fixture modules as plugins
a_ : int = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit']):
continue
item.add_marker(pytest.mark.unit)
def lowerCamelCase__ (_UpperCAmelCase):
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12')
@pytest.fixture(autouse=_UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
SCREAMING_SNAKE_CASE = tmp_path_factory.getbasetemp() / 'cache'
SCREAMING_SNAKE_CASE = test_hf_cache_home / 'datasets'
SCREAMING_SNAKE_CASE = test_hf_cache_home / 'metrics'
SCREAMING_SNAKE_CASE = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_UpperCAmelCase))
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_UpperCAmelCase))
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_UpperCAmelCase))
@pytest.fixture(autouse=_UpperCAmelCase , scope='session')
def lowerCamelCase__ ():
datasets.disable_progress_bar()
@pytest.fixture(autouse=_UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase):
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _UpperCAmelCase)
@pytest.fixture
def lowerCamelCase__ (_UpperCAmelCase):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _UpperCAmelCase)
| 137
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _snake_case :
def __init__( self , a , a=13 , a=64 , a=2 , a=3 , a=True , a=True , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=10 , a=0.02 , a=[1, 16, 4, 4] , a=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE = (self.image_size // 32) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=a , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> int:
SCREAMING_SNAKE_CASE = ViTHybridModel(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> List[Any]:
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : Optional[int] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
_lowercase : str = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
_lowercase : int = False
_lowercase : Any = False
_lowercase : Dict = False
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = ViTHybridModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(a)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear))
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(a)
SCREAMING_SNAKE_CASE = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , a)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(a)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=a)
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> str:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = ViTHybridModel.from_pretrained(a)
self.assertIsNotNone(a)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
a)
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=a , return_tensors='pt').to(a)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**a)
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , a)
SCREAMING_SNAKE_CASE = torch.tensor([-1.90_90, -0.49_93, -0.23_89]).to(a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4))
@slow
@require_accelerate
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384')
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto')
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=a , return_tensors='pt')
SCREAMING_SNAKE_CASE = model(**a)
SCREAMING_SNAKE_CASE = outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE = logits.argmax(-1).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat')
| 137
| 1
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowercase__ ( lowercase ):
def __init__( self : int ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Dict = params
_UpperCamelCase : Optional[Any] = np.array(lowerCamelCase__ )
_UpperCamelCase : Tuple = np.array([len(lowerCamelCase__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : str ):
'''simple docstring'''
return len(self.lengths )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Dict = self.params.max_model_input_size
_UpperCamelCase : Any = self.lengths > max_len
logger.info(F'Splitting {sum(lowerCamelCase__ )} too long sequences.' )
def divide_chunks(lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ):
return [l[i : i + n] for i in range(0 ,len(lowerCamelCase__ ) ,lowerCamelCase__ )]
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : List[Any] = []
if self.params.mlm:
_UpperCamelCase : int = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
_UpperCamelCase : Optional[Any] = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids ,self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_UpperCamelCase : List[str] = []
for sub_s in divide_chunks(seq_ ,max_len - 2 ):
if sub_s[0] != cls_id:
_UpperCamelCase : Tuple = np.insert(lowerCamelCase__ ,0 ,lowerCamelCase__ )
if sub_s[-1] != sep_id:
_UpperCamelCase : Optional[int] = np.insert(lowerCamelCase__ ,len(lowerCamelCase__ ) ,lowerCamelCase__ )
assert len(lowerCamelCase__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowerCamelCase__ )
new_tok_ids.extend(lowerCamelCase__ )
new_lengths.extend([len(lowerCamelCase__ ) for l in sub_seqs] )
_UpperCamelCase : str = np.array(lowerCamelCase__ )
_UpperCamelCase : Any = np.array(lowerCamelCase__ )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = len(self )
_UpperCamelCase : Any = self.lengths > 11
_UpperCamelCase : Optional[int] = self.token_ids[indices]
_UpperCamelCase : Optional[Any] = self.lengths[indices]
_UpperCamelCase : Optional[Any] = len(self )
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
_UpperCamelCase : Tuple = self.params.special_tok_ids['unk_token']
_UpperCamelCase : Optional[int] = len(self )
_UpperCamelCase : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_UpperCamelCase : Union[str, Any] = (unk_occs / self.lengths) < 0.5
_UpperCamelCase : Optional[Any] = self.token_ids[indices]
_UpperCamelCase : int = self.lengths[indices]
_UpperCamelCase : Optional[Any] = len(self )
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : int = [t[0] for t in batch]
_UpperCamelCase : Any = [t[1] for t in batch]
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
# Max for paddings
_UpperCamelCase : Dict = max(lowerCamelCase__ )
# Pad token ids
if self.params.mlm:
_UpperCamelCase : str = self.params.special_tok_ids['pad_token']
else:
_UpperCamelCase : Optional[Any] = self.params.special_tok_ids['unk_token']
_UpperCamelCase : Optional[Any] = [list(t.astype(lowerCamelCase__ ) ) + [pad_idx] * (max_seq_len_ - len(lowerCamelCase__ )) for t in token_ids]
assert len(tk_ ) == len(lowerCamelCase__ )
assert all(len(lowerCamelCase__ ) == max_seq_len_ for t in tk_ )
_UpperCamelCase : List[Any] = torch.tensor(tk_ ) # (bs, max_seq_len_)
_UpperCamelCase : Union[str, Any] = torch.tensor(lowerCamelCase__ ) # (bs)
return tk_t, lg_t
| 365
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case_ : Optional[Any] = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = ['CLIPFeatureExtractor']
snake_case_ : Dict = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
snake_case_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 236
| 0
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def UpperCamelCase__ ( A__ = "" ) -> Dict:
snake_case__ : Optional[int] = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
snake_case__ : str = BeautifulSoup(requests.get(_UpperCAmelCase ).text , 'html.parser' )
snake_case__ : List[Any] = soup.find_all('td' , attrs='titleColumn' )
snake_case__ : List[str] = soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_UpperCAmelCase , _UpperCAmelCase )
}
def UpperCamelCase__ ( A__ = "IMDb_Top_250_Movies.csv" ) -> Any:
snake_case__ : Any = get_imdb_top_aaa_movies()
with open(_UpperCAmelCase , 'w' , newline='' ) as out_file:
snake_case__ : List[Any] = csv.writer(_UpperCAmelCase )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 143
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : str = ["""image_processor""", """tokenizer"""]
__lowerCAmelCase : Optional[Any] = """OwlViTImageProcessor"""
__lowerCAmelCase : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Union[str, Any] , _lowerCamelCase : str=None , _lowerCamelCase : Tuple=None , **_lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _lowerCamelCase , )
A_ : List[Any] = kwargs.pop('''feature_extractor''' )
A_ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self : Optional[int] , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=None , _lowerCamelCase : str="max_length" , _lowerCamelCase : List[Any]="np" , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(_lowerCamelCase , _lowerCamelCase ) or (isinstance(_lowerCamelCase , _lowerCamelCase ) and not isinstance(text[0] , _lowerCamelCase )):
A_ : List[str] = [self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )]
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(text[0] , _lowerCamelCase ):
A_ : Optional[int] = []
# Maximum number of queries across batch
A_ : Any = max([len(_lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_lowerCamelCase ) != max_num_queries:
A_ : Optional[int] = t + [''' '''] * (max_num_queries - len(_lowerCamelCase ))
A_ : Tuple = self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
encodings.append(_lowerCamelCase )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
A_ : Union[str, Any] = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
A_ : Dict = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A_ : List[Any] = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
A_ : Any = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A_ : Any = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
A_ : Union[str, Any] = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A_ : Tuple = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
A_ : str = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
A_ : Any = BatchEncoding()
A_ : Optional[Any] = input_ids
A_ : str = attention_mask
if query_images is not None:
A_ : Union[str, Any] = BatchEncoding()
A_ : Optional[Any] = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ).pixel_values
A_ : Dict = query_pixel_values
if images is not None:
A_ : int = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if text is not None and images is not None:
A_ : str = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A_ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase )
def a_ ( self : Optional[Any] , *_lowerCamelCase : int , **_lowerCamelCase : Dict ):
"""simple docstring"""
return self.image_processor.post_process(*_lowerCamelCase , **_lowerCamelCase )
def a_ ( self : Optional[Any] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Dict ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*_lowerCamelCase , **_lowerCamelCase )
def a_ ( self : List[Any] , *_lowerCamelCase : List[str] , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*_lowerCamelCase , **_lowerCamelCase )
def a_ ( self : str , *_lowerCamelCase : Tuple , **_lowerCamelCase : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def a_ ( self : Dict , *_lowerCamelCase : Any , **_lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def a_ ( self : List[str] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _lowerCamelCase , )
return self.image_processor_class
@property
def a_ ( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _lowerCamelCase , )
return self.image_processor
| 167
| 0
|
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = range(2, 2_0 + 1)
SCREAMING_SNAKE_CASE_ : Dict = [1_0**k for k in range(ks[-1] + 1)]
SCREAMING_SNAKE_CASE_ : dict[int, dict[int, list[list[int]]]] = {}
def _snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ):
A__ = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) )
A__ = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) )
A__ , A__ = 0, 0
A__ = n - i
A__ = memo.get(UpperCAmelCase_ )
if sub_memo is not None:
A__ = sub_memo.get(UpperCAmelCase_ )
if jumps is not None and len(UpperCAmelCase_ ) > 0:
# find and make the largest jump without going over
A__ = -1
for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A__ = _k
break
if max_jump >= 0:
A__ , A__ , A__ = jumps[max_jump]
# since the difference between jumps is cached, add c
A__ = diff + c
for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ):
A__ , A__ = divmod(UpperCAmelCase_ , 10 )
if new_c > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
A__ = []
else:
A__ = {c: []}
A__ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A__ , A__ = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A__ , A__ = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
A__ = sub_memo[c]
# keep jumps sorted by # of terms skipped
A__ = 0
while j < len(UpperCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ):
if i >= n:
return 0, i
if k > len(UpperCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A__ = i
A__ , A__ , A__ = 0, 0, 0
for j in range(len(UpperCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A__ = ds_c + ds_b
diff += addend
A__ = 0
for j in range(UpperCAmelCase_ ):
A__ = a_i[j] + addend
A__ , A__ = divmod(UpperCAmelCase_ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return diff, i - start_i
def _snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] ):
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
A__ = digits[j] + addend
if s >= 10:
A__ , A__ = divmod(UpperCAmelCase_ , 10 )
A__ = addend // 10 + quotient
else:
A__ = s
A__ = addend // 10
if addend == 0:
break
while addend > 0:
A__ , A__ = divmod(UpperCAmelCase_ , 10 )
digits.append(UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : int = 10**15 ):
A__ = [1]
A__ = 1
A__ = 0
while True:
A__ , A__ = next_term(UpperCAmelCase_ , 20 , i + dn , UpperCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
A__ = 0
for j in range(len(UpperCAmelCase_ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 69
|
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: int , UpperCamelCase: int , UpperCamelCase: Union[str, Any]=13 , UpperCamelCase: List[Any]=7 , UpperCamelCase: Any=True , UpperCamelCase: Optional[Any]=True , UpperCamelCase: Optional[Any]=True , UpperCamelCase: str=True , UpperCamelCase: Optional[int]=99 , UpperCamelCase: Optional[Any]=32 , UpperCamelCase: Tuple=5 , UpperCamelCase: Optional[int]=4 , UpperCamelCase: int=37 , UpperCamelCase: str="gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: List[Any]=0.1 , UpperCamelCase: Tuple=5_12 , UpperCamelCase: List[str]=16 , UpperCamelCase: List[str]=2 , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: List[str]=False , UpperCamelCase: int=True , UpperCamelCase: Union[str, Any]="None" , UpperCamelCase: Optional[int]=3 , UpperCamelCase: List[str]=4 , UpperCamelCase: List[str]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = relative_attention
A__ = position_biased_input
A__ = pos_att_type
A__ = scope
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: str ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = self.get_config()
A__ = 3_00
return config
def UpperCamelCase ( self: List[Any] , UpperCamelCase: str ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCamelCase ( self: Tuple , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = DebertaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )[0]
A__ = model(UpperCamelCase , token_type_ids=UpperCamelCase )[0]
A__ = model(UpperCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple , UpperCamelCase: Tuple , UpperCamelCase: str , UpperCamelCase: Any ):
"""simple docstring"""
A__ = DebertaForMaskedLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: int , UpperCamelCase: Dict , UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: str ):
"""simple docstring"""
A__ = self.num_labels
A__ = DebertaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCamelCase )
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: Dict , UpperCamelCase: int , UpperCamelCase: Optional[Any] , UpperCamelCase: str , UpperCamelCase: int ):
"""simple docstring"""
A__ = self.num_labels
A__ = DebertaForTokenClassification(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Tuple , UpperCamelCase: Any ):
"""simple docstring"""
A__ = DebertaForQuestionAnswering(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = DebertaModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self: int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCamelCase )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCamelCase )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCamelCase )
@slow
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = DebertaModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
pass
@slow
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
A__ = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
A__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
# compare the actual values for a slice.
A__ = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 69
| 1
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
a__ : List[str] =logging.get_logger(__name__)
a__ : str ={
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] ="dpt"
def __init__( self : int , __A : Optional[int]=7_6_8 , __A : Any=1_2 , __A : Tuple=1_2 , __A : Optional[Any]=3_0_7_2 , __A : List[Any]="gelu" , __A : Tuple=0.0 , __A : List[Any]=0.0 , __A : List[Any]=0.02 , __A : Dict=1e-12 , __A : List[Any]=3_8_4 , __A : List[str]=1_6 , __A : Union[str, Any]=3 , __A : Optional[int]=False , __A : str=True , __A : Union[str, Any]=[2, 5, 8, 1_1] , __A : Any="project" , __A : List[str]=[4, 2, 1, 0.5] , __A : Union[str, Any]=[9_6, 1_9_2, 3_8_4, 7_6_8] , __A : List[str]=2_5_6 , __A : str=-1 , __A : Dict=False , __A : Tuple=True , __A : Union[str, Any]=0.4 , __A : int=2_5_5 , __A : Tuple=0.1 , __A : Optional[Any]=[1, 1_0_2_4, 2_4, 2_4] , __A : List[str]=[0, 1] , __A : Optional[Any]=None , **__A : Tuple , ):
super().__init__(**__A )
__UpperCamelCase = hidden_size
__UpperCamelCase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
__UpperCamelCase = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
__UpperCamelCase = BitConfig(**__A )
elif isinstance(__A , __A ):
logger.info('Initializing the config with a `BiT` backbone.' )
__UpperCamelCase = BitConfig(**__A )
elif isinstance(__A , __A ):
__UpperCamelCase = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
__UpperCamelCase = backbone_featmap_shape
__UpperCamelCase = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = []
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = qkv_bias
__UpperCamelCase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
__UpperCamelCase = readout_type
__UpperCamelCase = reassemble_factors
__UpperCamelCase = neck_hidden_sizes
__UpperCamelCase = fusion_hidden_size
__UpperCamelCase = head_in_index
__UpperCamelCase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
__UpperCamelCase = use_auxiliary_head
__UpperCamelCase = auxiliary_loss_weight
__UpperCamelCase = semantic_loss_ignore_index
__UpperCamelCase = semantic_classifier_dropout
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__UpperCamelCase = self.backbone_config.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 53
|
"""simple docstring"""
from statistics import mean
import numpy as np
def __A ( a_ :list , a_ :list , a_ :list , a_ :int) -> list:
__a : Any = 0
# Number of processes finished
__a : Union[str, Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__a : Any = [0] * no_of_process
# List to include calculation results
__a : str = [0] * no_of_process
# Sort by arrival time.
__a : List[Any] = [burst_time[i] for i in np.argsort(a_)]
__a : Tuple = [process_name[i] for i in np.argsort(a_)]
arrival_time.sort()
while no_of_process > finished_process_count:
__a : Optional[Any] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__a : Dict = arrival_time[i]
__a : Dict = 0
# Index showing the location of the process being performed
__a : Tuple = 0
# Saves the current response ratio.
__a : List[str] = 0
for i in range(0 , a_):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__a : Tuple = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__a : Tuple = temp
__a : Optional[Any] = i
# Calculate the turn around time
__a : Optional[int] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__a : int = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __A ( a_ :list , a_ :list , a_ :list , a_ :int) -> list:
__a : Dict = [0] * no_of_process
for i in range(0 , a_):
__a : Optional[Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
A = 5
A = ['''A''', '''B''', '''C''', '''D''', '''E''']
A = [1, 2, 3, 4, 5]
A = [1, 2, 3, 4, 5]
A = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
A = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}')
| 160
| 0
|
from ...processing_utils import ProcessorMixin
class __snake_case ( lowerCAmelCase ):
_a : Union[str, Any]= "WhisperFeatureExtractor"
_a : int= "WhisperTokenizer"
def __init__( self ,snake_case ,snake_case ):
'''simple docstring'''
super().__init__(snake_case ,snake_case )
lowercase : Optional[int] = self.feature_extractor
lowercase : Tuple = False
def _SCREAMING_SNAKE_CASE ( self ,snake_case=None ,snake_case=None ,snake_case=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=snake_case ,language=snake_case ,no_timestamps=snake_case )
def __call__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*snake_case ,**snake_case )
lowercase : Optional[Any] = kwargs.pop("""audio""" ,snake_case )
lowercase : str = kwargs.pop("""sampling_rate""" ,snake_case )
lowercase : Dict = kwargs.pop("""text""" ,snake_case )
if len(snake_case ) > 0:
lowercase : List[Any] = args[0]
lowercase : Tuple = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
lowercase : Any = self.feature_extractor(snake_case ,*snake_case ,sampling_rate=snake_case ,**snake_case )
if text is not None:
lowercase : str = self.tokenizer(snake_case ,**snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase : List[Any] = encodings["""input_ids"""]
return inputs
def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case="np" ):
'''simple docstring'''
return self.tokenizer.get_prompt_ids(snake_case ,return_tensors=snake_case )
| 285
|
class __snake_case :
def __init__( self ,snake_case ,snake_case=None ,snake_case=None ):
'''simple docstring'''
lowercase : Tuple = data
lowercase : List[Any] = previous
lowercase : List[str] = next_node
def __str__( self ):
'''simple docstring'''
return f"{self.data}"
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.data
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.next
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.previous
class __snake_case :
def __init__( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = head
def __iter__( self ):
'''simple docstring'''
return self
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
lowercase : Union[str, Any] = self.current.get_data()
lowercase : Optional[Any] = self.current.get_next()
return value
class __snake_case :
def __init__( self ):
'''simple docstring'''
lowercase : str = None # First node in list
lowercase : str = None # Last node in list
def __str__( self ):
'''simple docstring'''
lowercase : int = self.head
lowercase : str = []
while current is not None:
nodes.append(current.get_data() )
lowercase : Dict = current.get_next()
return " ".join(str(snake_case ) for node in nodes )
def __contains__( self ,snake_case ):
'''simple docstring'''
lowercase : Dict = self.head
while current:
if current.get_data() == value:
return True
lowercase : Any = current.get_next()
return False
def __iter__( self ):
'''simple docstring'''
return LinkedListIterator(self.head )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if self.head is None:
lowercase : Any = node
lowercase : Dict = node
else:
self.insert_before_node(self.head ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if self.head is None:
self.set_head(snake_case )
else:
self.insert_after_node(self.tail ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Dict = Node(snake_case )
if self.head is None:
self.set_head(snake_case )
else:
self.set_tail(snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = node
lowercase : Optional[int] = node.previous
if node.get_previous() is None:
lowercase : Optional[int] = node_to_insert
else:
lowercase : Optional[int] = node_to_insert
lowercase : List[Any] = node_to_insert
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Dict = node
lowercase : List[str] = node.next
if node.get_next() is None:
lowercase : Union[str, Any] = node_to_insert
else:
lowercase : List[str] = node_to_insert
lowercase : Dict = node_to_insert
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = 1
lowercase : List[str] = Node(snake_case )
lowercase : Any = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case ,snake_case )
return
current_position += 1
lowercase : List[Any] = node.next
self.insert_after_node(self.tail ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = self.head
while node:
if node.get_data() == item:
return node
lowercase : Any = node.get_next()
raise Exception("""Node not found""" )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if (node := self.get_node(snake_case )) is not None:
if node == self.head:
lowercase : Optional[Any] = self.head.get_next()
if node == self.tail:
lowercase : List[str] = self.tail.get_previous()
self.remove_node_pointers(snake_case )
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
if node.get_next():
lowercase : Optional[int] = node.previous
if node.get_previous():
lowercase : Union[str, Any] = node.next
lowercase : Union[str, Any] = None
lowercase : Optional[Any] = None
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.head is None
def _snake_case( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285
| 1
|
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase ( lowerCAmelCase_):
__a : Optional[int] = 'new-model'
if is_tf_available():
class _UpperCAmelCase ( lowerCAmelCase_):
__a : Any = NewModelConfig
@require_tf
class _UpperCAmelCase ( unittest.TestCase):
@slow
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Dict = """bert-base-cased"""
_UpperCAmelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : int = TFAutoModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = """bert-base-cased"""
_UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : List[str] = TFAutoModelForPreTraining.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Tuple = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(__lowerCAmelCase , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = TFAutoModelWithLMHead.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def __snake_case ( self ) -> Dict:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(__lowerCAmelCase , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def __snake_case ( self ) -> str:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : int = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = TFAutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = TFAutoModelForQuestionAnswering.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
@require_tensorflow_probability
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
_UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : str = TFAutoModelForTableQuestionAnswering.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(
__lowerCAmelCase , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowerCAmelCase ) , 1_44_10 )
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowerCAmelCase ) , 1_44_10 )
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Dict = copy.deepcopy(model.config )
_UpperCAmelCase : Dict = ["""FunnelBaseModel"""]
_UpperCAmelCase : Any = TFAutoModel.from_config(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = TFAutoModel.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
try:
AutoConfig.register("""new-model""" , __lowerCAmelCase )
_UpperCAmelCase : List[Any] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(__lowerCAmelCase ):
auto_class.register(__lowerCAmelCase , __lowerCAmelCase )
auto_class.register(__lowerCAmelCase , __lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
auto_class.register(__lowerCAmelCase , __lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_UpperCAmelCase : Union[str, Any] = BertModelTester(self ).get_config()
_UpperCAmelCase : int = NewModelConfig(**tiny_config.to_dict() )
_UpperCAmelCase : Dict = auto_class.from_config(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__lowerCAmelCase )
_UpperCAmelCase : int = auto_class.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __snake_case ( self ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
_UpperCAmelCase : int = TFAutoModel.from_pretrained("""bert-base""" )
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_UpperCAmelCase : Tuple = TFAutoModel.from_pretrained(__lowerCAmelCase , revision="""aaaaaa""" )
def __snake_case ( self ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
_UpperCAmelCase : Any = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(__lowerCAmelCase , """Use `from_pt=True` to load this model""" ):
_UpperCAmelCase : int = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : int = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
_UpperCAmelCase : int = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
_UpperCAmelCase : int = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
_UpperCAmelCase : Union[str, Any] = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 246
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Optional[Any]=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : List[str]=400 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=None , __lowerCAmelCase : List[str]=True , ):
_UpperCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_flip_channel_order
def lowerCAmelCase_ ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[int] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_flip_channel_order""" ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCAmelCase_ ( self : List[str] ):
pass
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self : str ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 289
| 0
|
# Algorithm for the pigeonhole sorting
def A_ ( A__ ) -> int:
a__ : Any = min(A__ ) # min() finds the minimum value
a__ : List[Any] = max(A__ ) # max() finds the maximum value
a__ : Optional[int] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
a__ : Tuple = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(A__ , A__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
a__ : Optional[int] = 0
for count in range(A__ ):
while holes[count] > 0:
holes[count] -= 1
a__ : Optional[Any] = count + min_val
i += 1
def A_ ( ) -> int:
a__ : Optional[int] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(A__ )
print('Sorted order is:' , ' '.join(A__ ) )
if __name__ == "__main__":
main()
| 225
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase : List[str] = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["""CLIPFeatureExtractor"""]
lowercase : Union[str, Any] = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225
| 1
|
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
A : List[Any] = (boundary[1] - boundary[0]) / steps
A : Any = boundary[0]
A : str = boundary[1]
A : str = make_points(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Dict = 0.0
y += (h / 2.0) * f(_lowerCAmelCase )
for i in x_i:
# print(i)
y += h * f(_lowerCAmelCase )
y += (h / 2.0) * f(_lowerCAmelCase )
return y
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
A : List[Any] = a + h
while x < (b - h):
yield x
A : List[Any] = x + h
def __UpperCamelCase ( _lowerCAmelCase ) -> List[Any]: # enter your function here
"""simple docstring"""
A : List[str] = (x - 0) * (x - 0)
return y
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : List[str] = 0.0 # Lower bound of integration
A : str = 1.0 # Upper bound of integration
A : str = 10.0 # define number of steps or resolution
A : int = [a, b] # define boundary of integration
A : Tuple = method_a(_lowerCAmelCase , _lowerCAmelCase )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 116
|
from __future__ import annotations
from math import ceil, floor, sqrt
def __UpperCamelCase ( _lowerCAmelCase = 200_0000 ) -> int:
"""simple docstring"""
A : list[int] = [0]
A : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
A : int = 0
# the area corresponding to the grid that gives the product closest to target
A : int = 0
# an estimate of b, using the quadratic formula
A : float
# the largest integer less than b_estimate
A : int
# the largest integer less than b_estimate
A : int
# the triangle number corresponding to b_floor
A : int
# the triangle number corresponding to b_ceil
A : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
A : Union[str, Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
A : List[Any] = floor(_lowerCAmelCase )
A : Tuple = ceil(_lowerCAmelCase )
A : int = triangle_numbers[b_floor]
A : Dict = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
A : Optional[int] = triangle_b_first_guess * triangle_a
A : Optional[int] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
A : Tuple = triangle_b_second_guess * triangle_a
A : Tuple = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 116
| 1
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCamelCase__ ( lowercase ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCamelCase__ ( ):
"""simple docstring"""
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 2, 3]
with pytest.raises(lowercase ):
with parallel_backend("unsupported backend" ):
map_nested(lowercase , lowercase , num_proc=2 )
with pytest.raises(lowercase ):
with parallel_backend("unsupported backend" ):
map_nested(lowercase , lowercase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [1, 2]
SCREAMING_SNAKE_CASE : Optional[int] = {"a": 1, "b": 2}
SCREAMING_SNAKE_CASE : Dict = {"a": [1, 2], "b": [3, 4]}
SCREAMING_SNAKE_CASE : Optional[Any] = {"a": {"1": 1}, "b": 2}
SCREAMING_SNAKE_CASE : int = {"a": 1, "b": 2, "c": 3, "d": 4}
SCREAMING_SNAKE_CASE : Dict = [2, 3]
SCREAMING_SNAKE_CASE : str = {"a": 2, "b": 3}
SCREAMING_SNAKE_CASE : str = {"a": [2, 3], "b": [4, 5]}
SCREAMING_SNAKE_CASE : Optional[Any] = {"a": {"1": 2}, "b": 3}
SCREAMING_SNAKE_CASE : List[str] = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
| 350
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case = 16
snake_case = 32
def lowerCamelCase__ ( lowercase , lowercase = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : List[Any] = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE : str = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE : Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
SCREAMING_SNAKE_CASE : Dict = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1":
SCREAMING_SNAKE_CASE : int = 2
# New Code #
SCREAMING_SNAKE_CASE : Union[str, Any] = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : Any = config["lr"]
SCREAMING_SNAKE_CASE : Optional[Any] = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE : List[Any] = int(config["seed"] )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load("glue" , "mrpc" )
set_seed(lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE : Any = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE : Any = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase ):
SCREAMING_SNAKE_CASE : Any = model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = output.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
SCREAMING_SNAKE_CASE : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowercase )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 319
| 0
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
])
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : List[str] )->int:
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=_SCREAMING_SNAKE_CASE , )
assert hasattr(self , '''env''' )
def lowercase__ ( self : Dict , __UpperCamelCase : Optional[int]=1 )->int:
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'{self.env.base_job_name}-single' , instance_count=_SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=_SCREAMING_SNAKE_CASE , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Union[str, Any] )->Dict:
TrainingJobAnalytics(_SCREAMING_SNAKE_CASE ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' )
def lowercase__ ( self : Dict )->str:
# create estimator
_UpperCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
_UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _SCREAMING_SNAKE_CASE )
| 260
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
lowerCamelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
lowerCamelCase = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
lowerCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _a ( _lowercase):
_a : Any = VOCAB_FILES_NAMES
_a : List[str] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _a ( _lowercase):
_a : int = VOCAB_FILES_NAMES
_a : List[Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Any = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCamelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCamelCase = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(_lowercase)
class _a :
def __call__( self : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Union[bool, str] = False , _SCREAMING_SNAKE_CASE : Union[bool, str] = False , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , **_SCREAMING_SNAKE_CASE : str , )-> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
elif titles is None or texts is None:
lowerCAmelCase__ : Tuple = titles if texts is None else texts
return super().__call__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : int = titles if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else [titles]
lowerCAmelCase__ : Dict = texts if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else [texts]
lowerCAmelCase__ : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = questions if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else [questions] * n_passages
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
F'There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.' )
lowerCAmelCase__ : Union[str, Any] = super().__call__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
lowerCAmelCase__ : str = super().__call__(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
lowerCAmelCase__ : Dict = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
}
if return_attention_mask is not False:
lowerCAmelCase__ : Union[str, Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCAmelCase__ : Tuple = attention_mask
return self.pad(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : BatchEncoding , _SCREAMING_SNAKE_CASE : DPRReaderOutput , _SCREAMING_SNAKE_CASE : int = 16 , _SCREAMING_SNAKE_CASE : int = 64 , _SCREAMING_SNAKE_CASE : int = 4 , )-> List[DPRSpanPrediction]:
lowerCAmelCase__ : Optional[int] = reader_input['''input_ids''']
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = reader_output[:3]
lowerCAmelCase__ : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = sorted(range(_SCREAMING_SNAKE_CASE ) , reverse=_SCREAMING_SNAKE_CASE , key=relevance_logits.__getitem__ )
lowerCAmelCase__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
lowerCAmelCase__ : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCAmelCase__ : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCAmelCase__ : Any = sequence_ids.index(self.pad_token_id )
else:
lowerCAmelCase__ : Optional[int] = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_SCREAMING_SNAKE_CASE , top_spans=_SCREAMING_SNAKE_CASE , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_SCREAMING_SNAKE_CASE , start_index=_SCREAMING_SNAKE_CASE , end_index=_SCREAMING_SNAKE_CASE , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_SCREAMING_SNAKE_CASE ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , )-> List[DPRSpanPrediction]:
lowerCAmelCase__ : Union[str, Any] = []
for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCAmelCase__ : List[Any] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[1] , reverse=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'Wrong span indices: [{start_index}:{end_index}]' )
lowerCAmelCase__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_SCREAMING_SNAKE_CASE ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_lowercase)
class _a ( _lowercase , _lowercase):
_a : List[str] = VOCAB_FILES_NAMES
_a : str = READER_PRETRAINED_VOCAB_FILES_MAP
_a : Optional[int] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Union[str, Any] = READER_PRETRAINED_INIT_CONFIGURATION
_a : Optional[int] = ['''input_ids''', '''attention_mask''']
| 131
| 0
|
def a__ ( A__, A__ ):
if len(A__ ) != len(A__ ):
raise ValueError('String lengths must match!' )
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for chara, chara in zip(A__, A__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ : List[Any] =logging.get_logger(__name__)
lowerCAmelCase__ : Tuple ={
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class __lowercase (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """focalnet"""
def __init__( self , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=4 , lowerCAmelCase__=3 , lowerCAmelCase__=9_6 , lowerCAmelCase__=False , lowerCAmelCase__=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , lowerCAmelCase__=[2, 2, 6, 2] , lowerCAmelCase__=[2, 2, 2, 2] , lowerCAmelCase__=[3, 3, 3, 3] , lowerCAmelCase__="gelu" , lowerCAmelCase__=4.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=False , lowerCAmelCase__=1E-4 , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=3_2 , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : str = patch_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE_ : Any = use_conv_embed
SCREAMING_SNAKE_CASE_ : Dict = hidden_sizes
SCREAMING_SNAKE_CASE_ : Any = depths
SCREAMING_SNAKE_CASE_ : Optional[Any] = focal_levels
SCREAMING_SNAKE_CASE_ : Any = focal_windows
SCREAMING_SNAKE_CASE_ : Tuple = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = mlp_ratio
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE_ : List[Any] = use_layerscale
SCREAMING_SNAKE_CASE_ : List[Any] = layerscale_value
SCREAMING_SNAKE_CASE_ : List[str] = use_post_layernorm
SCREAMING_SNAKE_CASE_ : Optional[int] = use_post_layernorm_in_modulation
SCREAMING_SNAKE_CASE_ : str = normalize_modulator
SCREAMING_SNAKE_CASE_ : List[str] = initializer_range
SCREAMING_SNAKE_CASE_ : str = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Dict = encoder_stride
SCREAMING_SNAKE_CASE_ : Dict = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 162
| 0
|
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list:
'''simple docstring'''
UpperCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
UpperCAmelCase = True
for i in range(0 , len(UpperCamelCase__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase , UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase = False
for i in range(1 , len(UpperCamelCase__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase , UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
__A : List[str] = [int(x) for x in input().split()]
# inputing elements of the list in one line
__A : Union[str, Any] = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 273
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__A : str = random.Random()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=None ) -> Tuple:
'''simple docstring'''
if rng is None:
UpperCAmelCase = global_rng
UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class A_ (unittest.TestCase ):
def __init__( self , _A , _A=7 , _A=4_0_0 , _A=2_0_0_0 , _A=1 , _A=0.0 , _A=1_6_0_0_0 , _A=True , _A=8_0 , _A=1_6 , _A=6_4 , _A="hann_window" , _A=8_0 , _A=7_6_0_0 , _A=1E-10 , _A=True , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = min_seq_length
UpperCAmelCase = max_seq_length
UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase = feature_size
UpperCAmelCase = padding_value
UpperCAmelCase = sampling_rate
UpperCAmelCase = do_normalize
UpperCAmelCase = num_mel_bins
UpperCAmelCase = hop_length
UpperCAmelCase = win_length
UpperCAmelCase = win_function
UpperCAmelCase = fmin
UpperCAmelCase = fmax
UpperCAmelCase = mel_floor
UpperCAmelCase = return_attention_mask
def _lowercase ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _lowercase ( self , _A=False , _A=False ):
'''simple docstring'''
def _flatten(_A ):
return list(itertools.chain(*_A ) )
if equal_length:
UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
def _lowercase ( self , _A=False , _A=False ):
'''simple docstring'''
if equal_length:
UpperCAmelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = SpeechTaFeatureExtractor
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = SpeechTaFeatureExtractionTester(self )
def _lowercase ( self , _A ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , padding=_A , max_length=_A , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 )
UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , max_length=_A , padding=_A )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase = feature_extractor(audio_target=_A , padding=_A , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCAmelCase = np.asarray(_A )
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_A ) == len(_A ) for x, y in zip(_A , processed_features[input_name] ) ) )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A )
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A )
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )[input_name]
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**_A )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(_A ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**_A )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(_A ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = min(_A )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _lowercase ( self , _A ):
'''simple docstring'''
from datasets import load_dataset
UpperCAmelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCAmelCase = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , _A , atol=1E-6 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(audio_target=_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _A , atol=1E-4 ) )
| 273
| 1
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowercase_ = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
lowercase_ = {
"""facebook/blenderbot_small-90M""": 512,
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : int = BlenderbotSmallTokenizer
def __init__( self : Any , a : List[Any]=None , a : Tuple=None , a : List[str]="<|endoftext|>" , a : List[Any]="<|endoftext|>" , a : Optional[Any]="<|endoftext|>" , a : Tuple=False , a : Optional[Any]=True , **a : Union[str, Any] , )-> Tuple:
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=a , merges=a , add_prefix_space=a , trim_offsets=a , ) , bos_token=a , eos_token=a , unk_token=a , **a , )
lowercase__ = add_prefix_space
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple , a : Any=None )-> List[Any]:
"""simple docstring"""
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 269
|
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """spiece.model"""}
lowercase_ = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
lowercase_ = {
"""AI-Sweden/gpt-sw3-126m""": 2_048,
"""AI-Sweden/gpt-sw3-350m""": 2_048,
"""AI-Sweden/gpt-sw3-1.6b""": 2_048,
"""AI-Sweden/gpt-sw3-6.7b""": 2_048,
"""AI-Sweden/gpt-sw3-20b""": 2_048,
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Any = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , a : Tuple , a : Optional[int]=False , a : str=False , a : str=False , a : Tuple=None , a : Any=None , a : Union[str, Any]=None , a : Union[str, Any]=None , a : Optional[Dict[str, Any]] = None , **a : Optional[int] , )-> None:
"""simple docstring"""
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase__ = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
lowercase__ = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowercase__ = '<|endoftext|>' if eos_token is None else eos_token
lowercase__ = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowercase__ = unk_token if pad_token is None else pad_token
lowercase__ = eos_token if bos_token is None else bos_token
else:
lowercase__ = '<pad>' if pad_token is None else pad_token
lowercase__ = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , pad_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
lowercase__ = do_lower_case
lowercase__ = remove_space
lowercase__ = keep_accents
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
# Used for whitespace normalization in input texts
# fmt : off
lowercase__ = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowercase__ = re.compile(
f"""[{"".join(map(a , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]""" )
def __getstate__( self : Any )-> str:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : int , a : Optional[Any] )-> int:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> int:
"""simple docstring"""
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : str )-> str:
"""simple docstring"""
lowercase__ = self.non_printing_characters_re.sub('' , a )
# Normalize whitespaces
lowercase__ = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
lowercase__ = unicodedata.normalize('NFC' , a )
return text
def SCREAMING_SNAKE_CASE_ ( self : Any , a : str , **a : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = self.preprocess_text(a )
return self.sp_model.encode(a , out_type=a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str )-> int:
"""simple docstring"""
return self.sp_model.PieceToId(a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : int )-> str:
"""simple docstring"""
return self.sp_model.IdToPiece(a )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( a : str )-> str:
"""simple docstring"""
return out_string
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[str] )-> str:
"""simple docstring"""
lowercase__ = []
lowercase__ = ''
lowercase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a ) + token
lowercase__ = True
lowercase__ = []
else:
current_sub_tokens.append(a )
lowercase__ = False
out_string += self.sp_model.decode(a )
return out_string
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict[str, int]:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_ ( self : Any , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : Union[str, List[str]] , a : Union[str, bool] = False )-> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(a , a ):
lowercase__ = self.preprocess_text(a )
lowercase__ = self.sp_model.encode(a )
else:
lowercase__ = [self.preprocess_text(a ) for t in text]
lowercase__ = self.sp_model.encode(a )
if return_tensors is True or return_tensors == "pt":
lowercase__ = torch.tensor(a )
return token_ids
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Union[int, List[int]] )-> str:
"""simple docstring"""
return self.sp_model.decode(a )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : "Conversation" )-> List[int]:
"""simple docstring"""
lowercase__ = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
lowercase__ = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(a ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=a )
| 269
| 1
|
from math import factorial
class a :
def __init__( self :int ,__lowercase :Any ,__lowercase :Dict ):
snake_case__ : Optional[int] = real
if isinstance(__lowercase ,__lowercase ):
snake_case__ : Optional[int] = [1] * rank
else:
snake_case__ : Optional[int] = rank
def __repr__( self :Optional[int] ):
return (
F"""{self.real}+"""
F"""{'+'.join(str(__lowercase )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Any = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real ,__lowercase )
def __add__( self :Tuple ,__lowercase :List[str] ):
if not isinstance(__lowercase ,__lowercase ):
return Dual(self.real + other ,self.duals )
snake_case__ : Union[str, Any] = self.duals.copy()
snake_case__ : List[Any] = other.duals.copy()
if len(__lowercase ) > len(__lowercase ):
o_dual.extend([1] * (len(__lowercase ) - len(__lowercase )) )
elif len(__lowercase ) < len(__lowercase ):
s_dual.extend([1] * (len(__lowercase ) - len(__lowercase )) )
snake_case__ : Optional[int] = []
for i in range(len(__lowercase ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real ,__lowercase )
__lowerCAmelCase : Optional[Any] = __add__
def __sub__( self :List[str] ,__lowercase :Optional[Any] ):
return self + other * -1
def __mul__( self :int ,__lowercase :List[Any] ):
if not isinstance(__lowercase ,__lowercase ):
snake_case__ : Tuple = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other ,__lowercase )
snake_case__ : str = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real ,__lowercase )
__lowerCAmelCase : List[Any] = __mul__
def __truediv__( self :Optional[Any] ,__lowercase :List[Any] ):
if not isinstance(__lowercase ,__lowercase ):
snake_case__ : Tuple = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other ,__lowercase )
raise ValueError
def __floordiv__( self :Optional[int] ,__lowercase :Optional[Any] ):
if not isinstance(__lowercase ,__lowercase ):
snake_case__ : int = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other ,__lowercase )
raise ValueError
def __pow__( self :int ,__lowercase :Any ):
if n < 0 or isinstance(__lowercase ,__lowercase ):
raise ValueError('''power must be a positive integer''' )
if n == 0:
return 1
if n == 1:
return self
snake_case__ : Any = self
for _ in range(n - 1 ):
x *= self
return x
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
if not callable(__lowerCAmelCase ):
raise ValueError('''differentiate() requires a function as input for func''' )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError('''differentiate() requires a float as input for position''' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''differentiate() requires an int as input for order''' )
snake_case__ : str = Dual(__lowerCAmelCase , 1 )
snake_case__ : List[str] = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _lowerCAmelCase ( __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 230
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
A__ = logging.get_logger(__name__)
A__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__ = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
A__ = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
A__ = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[Any] = RealmTokenizer
def __init__( self :Optional[Any] ,__lowercase :Dict=None ,__lowercase :Optional[int]=None ,__lowercase :Optional[Any]=True ,__lowercase :Optional[int]="[UNK]" ,__lowercase :List[str]="[SEP]" ,__lowercase :List[str]="[PAD]" ,__lowercase :int="[CLS]" ,__lowercase :str="[MASK]" ,__lowercase :Dict=True ,__lowercase :List[str]=None ,**__lowercase :Any ,):
super().__init__(
__lowercase ,tokenizer_file=__lowercase ,do_lower_case=__lowercase ,unk_token=__lowercase ,sep_token=__lowercase ,pad_token=__lowercase ,cls_token=__lowercase ,mask_token=__lowercase ,tokenize_chinese_chars=__lowercase ,strip_accents=__lowercase ,**__lowercase ,)
snake_case__ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,__lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,__lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,__lowercase ) != tokenize_chinese_chars
):
snake_case__ : Optional[int] = getattr(__lowercase ,normalizer_state.pop('''type''' ) )
snake_case__ : List[Any] = do_lower_case
snake_case__ : Optional[Any] = strip_accents
snake_case__ : List[str] = tokenize_chinese_chars
snake_case__ : Dict = normalizer_class(**__lowercase )
snake_case__ : Tuple = do_lower_case
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Union[str, Any] ,**__lowercase :Any ):
snake_case__ : Dict = PaddingStrategy.MAX_LENGTH
snake_case__ : List[str] = text
snake_case__ : int = kwargs.pop('''text_pair''' ,__lowercase )
snake_case__ : Optional[int] = kwargs.pop('''return_tensors''' ,__lowercase )
snake_case__ : str = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(__lowercase ):
if batch_text_pair is not None:
snake_case__ : Optional[int] = batch_text_pair[idx]
else:
snake_case__ : Tuple = None
snake_case__ : List[str] = super().__call__(__lowercase ,__lowercase ,return_tensors=__lowercase ,**__lowercase )
snake_case__ : Optional[Any] = encoded_candidates.get('''input_ids''' )
snake_case__ : Optional[Any] = encoded_candidates.get('''attention_mask''' )
snake_case__ : List[Any] = encoded_candidates.get('''token_type_ids''' )
if encoded_input_ids is not None:
output_data["input_ids"].append(__lowercase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__lowercase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__lowercase )
snake_case__ : Any = {key: item for key, item in output_data.items() if len(__lowercase ) != 0}
return BatchEncoding(__lowercase ,tensor_type=__lowercase )
def __lowerCamelCase ( self :List[Any] ,__lowercase :Tuple ,__lowercase :Tuple=None ):
snake_case__ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self :List[str] ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ):
snake_case__ : Tuple = [self.sep_token_id]
snake_case__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :str ,__lowercase :Optional[str] = None ):
snake_case__ : Tuple = self._tokenizer.model.save(__lowercase ,name=__lowercase )
return tuple(__lowercase )
| 230
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __A :
"""simple docstring"""
__lowerCAmelCase = PegasusConfig
__lowerCAmelCase = {}
__lowerCAmelCase = "gelu"
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=False , __A=99 , __A=32 , __A=2 , __A=4 , __A=37 , __A=0.1 , __A=0.1 , __A=40 , __A=2 , __A=1 , __A=0 , ) -> Optional[int]:
a =parent
a =batch_size
a =seq_length
a =is_training
a =use_labels
a =vocab_size
a =hidden_size
a =num_hidden_layers
a =num_attention_heads
a =intermediate_size
a =hidden_dropout_prob
a =attention_probs_dropout_prob
a =max_position_embeddings
a =eos_token_id
a =pad_token_id
a =bos_token_id
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a =tf.concat([input_ids, eos_tensor] , axis=1 )
a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
a =prepare_pegasus_inputs_dict(__A , __A , __A )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> Union[str, Any]:
a =TFPegasusModel(config=__A ).get_decoder()
a =inputs_dict['''input_ids''']
a =input_ids[:1, :]
a =inputs_dict['''attention_mask'''][:1, :]
a =inputs_dict['''head_mask''']
a =1
# first forward pass
a =model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
a , a =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a =ids_tensor((self.batch_size, 3) , config.vocab_size )
a =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a =tf.concat([input_ids, next_tokens] , axis=-1 )
a =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a =model(__A , attention_mask=__A )[0]
a =model(__A , attention_mask=__A , past_key_values=__A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a =output_from_no_past[:, -3:, random_slice_idx]
a =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__A , __A , rtol=1E-3 )
def _A ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , ):
"""simple docstring"""
if attention_mask is None:
a =tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
a =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
a =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
a =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __A ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__lowerCAmelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__lowerCAmelCase = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =TFPegasusModelTester(self )
a =ConfigTester(self , config_class=__A )
def SCREAMING_SNAKE_CASE ( self ) -> int:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__A )
@require_sentencepiece
@require_tokenizers
@require_tf
class __A ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__lowerCAmelCase = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__lowerCAmelCase = "google/pegasus-xsum"
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def SCREAMING_SNAKE_CASE ( self , **__A ) -> List[Any]:
a =self.translate_src_text(**__A )
assert self.expected_text == generated_words
def SCREAMING_SNAKE_CASE ( self , **__A ) -> str:
a =self.tokenizer(self.src_text , **__A , padding=__A , return_tensors='''tf''' )
a =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__A , )
a =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__A )
return generated_words
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Any:
self._assert_generated_batch_equal_expected()
| 215
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "WhisperFeatureExtractor"
__lowerCAmelCase = "WhisperTokenizer"
def __init__( self , __A , __A ) -> Dict:
super().__init__(__A , __A )
a =self.feature_extractor
a =False
def SCREAMING_SNAKE_CASE ( self , __A=None , __A=None , __A=True ) -> int:
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self , *__A , **__A ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
a =kwargs.pop('''audio''' , __A )
a =kwargs.pop('''sampling_rate''' , __A )
a =kwargs.pop('''text''' , __A )
if len(__A ) > 0:
a =args[0]
a =args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
a =self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if text is not None:
a =self.tokenizer(__A , **__A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a =encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__A , **__A )
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> Union[str, Any]:
return self.tokenizer.decode(*__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A="np" ) -> Optional[Any]:
return self.tokenizer.get_prompt_ids(__A , return_tensors=__A )
| 215
| 1
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 16000 ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = int(round(sample_rate * max_length ) )
if len(UpperCamelCase_ ) <= sample_length:
return wav
UpperCamelCase = randint(0 , len(UpperCamelCase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = field(default=__lowerCAmelCase , metadata={"""help""": """Name of a dataset from the datasets package"""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """A file containing the training audio paths and labels."""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """A file containing the validation audio paths and labels."""} )
__lowerCAmelCase = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to \'train\'"""
} , )
__lowerCAmelCase = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to \'validation\'"""
)
} , )
__lowerCAmelCase = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to \'audio\'"""} , )
__lowerCAmelCase = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to \'label\'"""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
__lowerCAmelCase = field(
default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} )
__lowerCAmelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , _lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def lowercase( ) -> Tuple:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , UpperCamelCase_ , UpperCamelCase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase_ )
transformers.utils.logging.set_verbosity(UpperCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase = DatasetDict()
UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
f"""{", ".join(raw_datasets["train"].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--label_column_name` to the correct text column - one of """
f"""{", ".join(raw_datasets["train"].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase = feature_extractor.model_input_names[0]
def train_transforms(UpperCamelCase_ ):
UpperCamelCase = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(UpperCamelCase_ )
UpperCamelCase = feature_extractor(UpperCamelCase_ , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase = {model_input_name: inputs.get(UpperCamelCase_ )}
UpperCamelCase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(UpperCamelCase_ ):
UpperCamelCase = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase = feature_extractor(UpperCamelCase_ , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase = {model_input_name: inputs.get(UpperCamelCase_ )}
UpperCamelCase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase = {}, {}
for i, label in enumerate(UpperCamelCase_ ):
UpperCamelCase = str(UpperCamelCase_ )
UpperCamelCase = label
# Load the accuracy metric from the datasets package
UpperCamelCase = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(UpperCamelCase_ ):
UpperCamelCase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=UpperCamelCase_ , references=eval_pred.label_ids )
UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(UpperCamelCase_ ) , labelaid=UpperCamelCase_ , idalabel=UpperCamelCase_ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(UpperCamelCase_ , output_all_columns=UpperCamelCase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(UpperCamelCase_ , output_all_columns=UpperCamelCase_ )
# Initialize our trainer
UpperCamelCase = Trainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
# Training
if training_args.do_train:
UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase = last_checkpoint
UpperCamelCase = trainer.train(resume_from_checkpoint=UpperCamelCase_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , UpperCamelCase_ )
trainer.save_metrics("""eval""" , UpperCamelCase_ )
# Write model card and (optionally) push to hub
UpperCamelCase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase_ )
else:
trainer.create_model_card(**UpperCamelCase_ )
if __name__ == "__main__":
main()
| 343
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_UpperCAmelCase : Tuple = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class __lowerCAmelCase ( lowerCAmelCase):
_a = '''ernie_m'''
_a = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self: List[Any] , _lowerCAmelCase: int = 25_00_02 , _lowerCAmelCase: int = 7_68 , _lowerCAmelCase: int = 12 , _lowerCAmelCase: int = 12 , _lowerCAmelCase: int = 30_72 , _lowerCAmelCase: str = "gelu" , _lowerCAmelCase: float = 0.1 , _lowerCAmelCase: float = 0.1 , _lowerCAmelCase: int = 5_14 , _lowerCAmelCase: float = 0.02 , _lowerCAmelCase: int = 1 , _lowerCAmelCase: float = 1e-0_5 , _lowerCAmelCase: Dict=None , _lowerCAmelCase: Optional[int]=False , _lowerCAmelCase: List[str]=0.0 , **_lowerCAmelCase: Tuple , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase :Tuple = vocab_size
lowercase :List[str] = hidden_size
lowercase :Optional[int] = num_hidden_layers
lowercase :Optional[Any] = num_attention_heads
lowercase :Optional[Any] = intermediate_size
lowercase :Optional[Any] = hidden_act
lowercase :Any = hidden_dropout_prob
lowercase :int = attention_probs_dropout_prob
lowercase :Dict = max_position_embeddings
lowercase :Optional[Any] = initializer_range
lowercase :Any = layer_norm_eps
lowercase :Union[str, Any] = classifier_dropout
lowercase :int = is_decoder
lowercase :List[str] = act_dropout
| 236
| 0
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Any = DebertaVaTokenizer
A_ : List[Any] = DebertaVaTokenizerFast
A_ : int = True
A_ : List[Any] = True
def a (self : str ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case = DebertaVaTokenizer(a__ , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def a (self : List[Any] , a__ : List[str] ):
"""simple docstring"""
__snake_case = '''this is a test'''
__snake_case = '''this is a test'''
return input_text, output_text
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = '''<pad>'''
__snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(a__ ) , 3_0001 )
def a (self : Dict ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def a (self : str ):
"""simple docstring"""
__snake_case = ''' \tHeLLo!how \n Are yoU? '''
__snake_case = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
__snake_case = DebertaVaTokenizer(a__ , do_lower_case=a__ )
__snake_case = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
__snake_case = DebertaVaTokenizerFast(a__ , do_lower_case=a__ )
__snake_case = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def a (self : Any ):
"""simple docstring"""
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def a (self : Dict ):
"""simple docstring"""
pass
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
__snake_case = DebertaVaTokenizer(a__ , split_by_punct=a__ )
__snake_case = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
__snake_case = DebertaVaTokenizerFast(a__ , split_by_punct=a__ )
__snake_case = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
def a (self : int ):
"""simple docstring"""
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
__snake_case = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__ )
__snake_case = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
__snake_case = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__ )
__snake_case = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
__snake_case = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__ )
__snake_case = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
__snake_case = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__ )
__snake_case = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
__snake_case = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__ )
__snake_case = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
__snake_case = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__ )
__snake_case = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ''' \tHeLLo!how \n Are yoU? '''
__snake_case = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
__snake_case = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__ )
__snake_case = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
__snake_case = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__ )
__snake_case = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
def a (self : Any ):
"""simple docstring"""
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) )
__snake_case = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
__snake_case = tokenizer.encode(a__ , add_special_tokens=a__ )
__snake_case = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(a__ )
__snake_case = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
def a (self : str ):
"""simple docstring"""
__snake_case = '''This is a test'''
__snake_case = [13, 1, 4398, 25, 21, 1289]
__snake_case = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
__snake_case = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
__snake_case = DebertaVaTokenizer(a__ , keep_accents=a__ )
__snake_case = DebertaVaTokenizerFast(a__ , keep_accents=a__ )
__snake_case = tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
__snake_case = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = rust_tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(a__ , a__ )
# fmt: off
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
__snake_case = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
__snake_case = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
__snake_case = tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
__snake_case = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = rust_tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(a__ , a__ )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = DebertaVaTokenizer(a__ )
__snake_case = tokenizer.encode('''sequence builders''' )
__snake_case = tokenizer.encode('''multi-sequence build''' )
__snake_case = tokenizer.build_inputs_with_special_tokens(a__ )
__snake_case = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , a__ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , a__ , )
@slow
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = {'''input_ids''': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 238
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case , __snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case = controlnet_params
__snake_case = '''bird'''
__snake_case = jax.device_count()
__snake_case = pipe.prepare_text_inputs([prompts] * num_samples )
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
__snake_case = pipe.prepare_image_inputs([canny_image] * num_samples )
__snake_case = jax.random.PRNGKey(0 )
__snake_case = jax.random.split(a__ , jax.device_count() )
__snake_case = replicate(a__ )
__snake_case = shard(a__ )
__snake_case = shard(a__ )
__snake_case = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__snake_case = images[0, 253:256, 253:256, -1]
__snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__snake_case = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case , __snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case = controlnet_params
__snake_case = '''Chef in the kitchen'''
__snake_case = jax.device_count()
__snake_case = pipe.prepare_text_inputs([prompts] * num_samples )
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
__snake_case = pipe.prepare_image_inputs([pose_image] * num_samples )
__snake_case = jax.random.PRNGKey(0 )
__snake_case = jax.random.split(a__ , jax.device_count() )
__snake_case = replicate(a__ )
__snake_case = shard(a__ )
__snake_case = shard(a__ )
__snake_case = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__snake_case = images[0, 253:256, 253:256, -1]
__snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__snake_case = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 238
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def lowerCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any] ) ->Tuple:
_SCREAMING_SNAKE_CASE = UniSpeechSatForSequenceClassification.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = downstream_dict["""projector.weight"""]
_SCREAMING_SNAKE_CASE = downstream_dict["""projector.bias"""]
_SCREAMING_SNAKE_CASE = downstream_dict["""model.post_net.linear.weight"""]
_SCREAMING_SNAKE_CASE = downstream_dict["""model.post_net.linear.bias"""]
return model
def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ) ->List[str]:
_SCREAMING_SNAKE_CASE = UniSpeechSatForAudioFrameClassification.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = downstream_dict["""model.linear.weight"""]
_SCREAMING_SNAKE_CASE = downstream_dict["""model.linear.bias"""]
return model
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict ) ->Dict:
_SCREAMING_SNAKE_CASE = UniSpeechSatForXVector.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = downstream_dict["""connector.weight"""]
_SCREAMING_SNAKE_CASE = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_SCREAMING_SNAKE_CASE = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
_SCREAMING_SNAKE_CASE = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
_SCREAMING_SNAKE_CASE = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
_SCREAMING_SNAKE_CASE = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
_SCREAMING_SNAKE_CASE = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
_SCREAMING_SNAKE_CASE = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
_SCREAMING_SNAKE_CASE = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def lowerCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ) ->List[str]:
_SCREAMING_SNAKE_CASE = torch.load(__lowerCamelCase , map_location="""cpu""" )
_SCREAMING_SNAKE_CASE = checkpoint["""Downstream"""]
_SCREAMING_SNAKE_CASE = UniSpeechSatConfig.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(
__lowerCamelCase , return_attention_mask=__lowerCamelCase , do_normalize=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
_SCREAMING_SNAKE_CASE = convert_classification(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
_SCREAMING_SNAKE_CASE = convert_diarization(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif arch.endswith("""ForXVector""" ):
_SCREAMING_SNAKE_CASE = convert_xvector(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
_SCREAMING_SNAKE_CASE = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(__lowerCamelCase )
hf_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
lowercase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 58
|
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def lowerCamelCase ( __lowerCamelCase : str ) ->str:
if not sentence:
return ""
_SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , __lowerCamelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58
| 1
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __a ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
for param in module.parameters():
A = False
def __a ( ) ->Tuple:
"""simple docstring"""
A = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
A = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
A = plt.imshow(UpperCAmelCase )
fig.axes.get_xaxis().set_visible(UpperCAmelCase )
fig.axes.get_yaxis().set_visible(UpperCAmelCase )
plt.show()
def __a ( ) ->Optional[int]:
"""simple docstring"""
A = datetime.now()
A = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 368
|
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase : List[str] = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase : List[str] = {
'Salesforce/codegen-350M-mono': 2048,
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = CodeGenTokenizer
def __init__(self : int , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Optional[Any]="<|endoftext|>" , _lowerCAmelCase : Dict="<|endoftext|>" , _lowerCAmelCase : Dict="<|endoftext|>" , _lowerCAmelCase : Any=False , **_lowerCAmelCase : Optional[int] , ):
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
if kwargs.pop("""add_bos_token""" , _lowerCAmelCase ):
A = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
F"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
F"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowerCAmelCase ) != add_prefix_space:
A = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
A = add_prefix_space
A = pre_tok_class(**_lowerCAmelCase )
A = add_prefix_space
def A (self : int , *_lowerCAmelCase : int , **_lowerCAmelCase : List[Any] ):
A = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[Any] ):
A = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def A (self : Tuple , _lowerCAmelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[List[str]] = None , **_lowerCAmelCase : Tuple , ):
A = super().decode(
token_ids=_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase , **_lowerCAmelCase , )
if truncate_before_pattern is not None and len(_lowerCAmelCase ) > 0:
A = self.truncate(_lowerCAmelCase , _lowerCAmelCase )
return decoded_text
def A (self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
def find_re(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ):
A = pattern.search(_lowerCAmelCase , _lowerCAmelCase )
return m.start() if m else -1
A = [re.compile(_lowerCAmelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
A = list(re.finditer("""^print""" , _lowerCAmelCase , re.MULTILINE ) )
if len(_lowerCAmelCase ) > 1:
A = completion[: prints[1].start()]
A = list(re.finditer("""^def""" , _lowerCAmelCase , re.MULTILINE ) )
if len(_lowerCAmelCase ) > 1:
A = completion[: defs[1].start()]
A = 0
A = [
pos for pos in [find_re(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for terminal in terminals] if pos != -1
]
if len(_lowerCAmelCase ) > 0:
return completion[: min(_lowerCAmelCase )]
else:
return completion
| 337
| 0
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 231
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
def __init__( self : Dict , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[str]=32 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : Dict=10 , _lowerCamelCase : Tuple=[10, 20, 30, 40] , _lowerCamelCase : int=[1, 1, 2, 1] , _lowerCamelCase : int=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : Dict=None , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = embeddings_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_act
_snake_case = num_labels
_snake_case = scope
_snake_case = len(_lowerCamelCase )
def lowercase ( self : Optional[int] ):
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowercase ( self : Tuple ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : List[Any] ):
_snake_case = TFResNetModel(config=_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase ( self : Dict , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple ):
_snake_case = self.num_labels
_snake_case = TFResNetForImageClassification(_lowerCamelCase )
_snake_case = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Tuple ):
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__a = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
__a = False
def lowercase ( self : List[Any] ):
_snake_case = TFResNetModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def lowercase ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : List[Any] ):
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def lowercase ( self : Any ):
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def lowercase ( self : List[str] ):
pass
def lowercase ( self : int ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def lowercase ( self : List[str] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowercase ( self : Union[str, Any] ):
def check_hidden_states_output(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str ):
_snake_case = model_class(_lowerCamelCase )
_snake_case = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_snake_case = layer_type
_snake_case = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Union[str, Any] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def lowercase ( self : List[str] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFResNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _UpperCAmelCase ( ) -> Union[str, Any]:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Dict ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase ( self : List[Any] ):
_snake_case = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=_lowerCamelCase , return_tensors='''tf''' )
# forward pass
_snake_case = model(**_lowerCamelCase )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
_snake_case = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowerCamelCase , atol=1e-4 ) )
| 288
| 0
|
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "isbn/0140328726" ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
lowerCAmelCase__ : List[str] = f"""{olid} is not a valid Open Library olid"""
raise ValueError(__UpperCAmelCase )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
lowerCAmelCase__ : Dict = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowerCAmelCase__ : Tuple = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
lowerCAmelCase__ : Optional[Any] = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Tuple = """, """.join(__UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowerCAmelCase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
_lowerCAmelCase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 356
|
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase = 10
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for i in range(UpperCamelCase , UpperCamelCase ):
if array[i] == target:
return i
return -1
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Optional[int] = len(UpperCamelCase )
while left <= right:
if right - left < precision:
return lin_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Tuple = (left + right) // 3 + 1
lowerCAmelCase__ : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase__ : int = one_third - 1
elif array[two_third] < target:
lowerCAmelCase__ : Union[str, Any] = two_third + 1
else:
lowerCAmelCase__ : List[Any] = one_third + 1
lowerCAmelCase__ : List[str] = two_third - 1
else:
return -1
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Dict = (left + right) // 3 + 1
lowerCAmelCase__ : Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCamelCase , one_third - 1 , UpperCamelCase , UpperCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCamelCase , UpperCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = input('''Enter numbers separated by comma:\n''').strip()
_lowerCAmelCase = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
_lowerCAmelCase = int(input('''Enter the number to be found in the list:\n''').strip())
_lowerCAmelCase = ite_ternary_search(collection, target)
_lowerCAmelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print('''Not found''')
| 184
| 0
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Dict ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : int = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 63
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Union[str, Any] = 'transfo-xl'
lowerCAmelCase__ : Any = ['mems']
lowerCAmelCase__ : Tuple = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,__UpperCAmelCase=26_77_35 ,__UpperCAmelCase=[2_00_00, 4_00_00, 20_00_00] ,__UpperCAmelCase=10_24 ,__UpperCAmelCase=10_24 ,__UpperCAmelCase=16 ,__UpperCAmelCase=64 ,__UpperCAmelCase=40_96 ,__UpperCAmelCase=4 ,__UpperCAmelCase=False ,__UpperCAmelCase=18 ,__UpperCAmelCase=16_00 ,__UpperCAmelCase=10_00 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=0 ,__UpperCAmelCase=-1 ,__UpperCAmelCase=True ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=True ,__UpperCAmelCase="normal" ,__UpperCAmelCase=0.0_1 ,__UpperCAmelCase=0.0_1 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1e-5 ,__UpperCAmelCase=0 ,**__UpperCAmelCase ,) -> Tuple:
A__ = vocab_size
A__ = []
self.cutoffs.extend(__UpperCAmelCase )
if proj_share_all_but_first:
A__ = [False] + [True] * len(self.cutoffs )
else:
A__ = [False] + [False] * len(self.cutoffs )
A__ = d_model
A__ = d_embed
A__ = d_head
A__ = d_inner
A__ = div_val
A__ = pre_lnorm
A__ = n_layer
A__ = n_head
A__ = mem_len
A__ = same_length
A__ = attn_type
A__ = clamp_len
A__ = sample_softmax
A__ = adaptive
A__ = dropout
A__ = dropatt
A__ = untie_r
A__ = init
A__ = init_range
A__ = proj_init_std
A__ = init_std
A__ = layer_norm_epsilon
super().__init__(eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
@property
def snake_case__ ( self ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case__ ( self ,__UpperCAmelCase ) -> int:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 221
| 0
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : int , A : str , A : Union[str, Any]=7 , A : Union[str, Any]=3 , A : Any=3_0 , A : int=4_0_0 , A : Dict=True , A : Optional[Any]=None , A : Union[str, Any]=True , A : Dict=[0.5, 0.5, 0.5] , A : List[Any]=[0.5, 0.5, 0.5] , A : Tuple=True , A : int=1 / 2_5_5 , A : Union[str, Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_UpperCAmelCase : Optional[Any] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
_UpperCAmelCase : Union[str, Any] = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : List[Any] = num_channels
_UpperCAmelCase : int = min_resolution
_UpperCAmelCase : List[str] = max_resolution
_UpperCAmelCase : Tuple = do_resize
_UpperCAmelCase : List[str] = size
_UpperCAmelCase : List[Any] = do_normalize
_UpperCAmelCase : List[Any] = image_mean
_UpperCAmelCase : Dict = image_std
_UpperCAmelCase : Optional[int] = do_rescale
_UpperCAmelCase : List[Any] = rescale_factor
_UpperCAmelCase : Any = do_pad
def snake_case_ ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case_ ( self : List[str] , A : int , A : Optional[int]=False ):
if not batched:
_UpperCAmelCase : Union[str, Any] = image_inputs[0]
if isinstance(A , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = image.size
else:
_UpperCAmelCase , _UpperCAmelCase : Tuple = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase : Tuple = int(self.size["shortest_edge"] * h / w )
_UpperCAmelCase : List[Any] = self.size["shortest_edge"]
elif w > h:
_UpperCAmelCase : List[Any] = self.size["shortest_edge"]
_UpperCAmelCase : List[str] = int(self.size["shortest_edge"] * w / h )
else:
_UpperCAmelCase : int = self.size["shortest_edge"]
_UpperCAmelCase : Dict = self.size["shortest_edge"]
else:
_UpperCAmelCase : str = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase : Optional[Any] = max(A , key=lambda A : item[0] )[0]
_UpperCAmelCase : Any = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = DetaImageProcessor if is_vision_available() else None
def snake_case_ ( self : Tuple ):
_UpperCAmelCase : int = DetaImageProcessingTester(self )
@property
def snake_case_ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , "image_mean" ) )
self.assertTrue(hasattr(A , "image_std" ) )
self.assertTrue(hasattr(A , "do_normalize" ) )
self.assertTrue(hasattr(A , "do_resize" ) )
self.assertTrue(hasattr(A , "do_rescale" ) )
self.assertTrue(hasattr(A , "do_pad" ) )
self.assertTrue(hasattr(A , "size" ) )
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , A )
def snake_case_ ( self : str ):
pass
def snake_case_ ( self : str ):
# Initialize image_processing
_UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.image_processor_tester.get_expected_values(A , batched=A )
_UpperCAmelCase : Optional[int] = image_processing(A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Union[str, Any] ):
# Initialize image_processing
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Dict = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase : int = image_processing(A , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Tuple ):
# Initialize image_processing
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase : List[str] = image_processing(A , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Any = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case_ ( self : Optional[int] ):
# prepare image and target
_UpperCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
_UpperCAmelCase : int = json.loads(f.read() )
_UpperCAmelCase : str = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
_UpperCAmelCase : Union[str, Any] = DetaImageProcessor()
_UpperCAmelCase : Optional[int] = image_processing(images=A , annotations=A , return_tensors="pt" )
# verify pixel values
_UpperCAmelCase : Optional[int] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , A )
_UpperCAmelCase : Optional[int] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
_UpperCAmelCase : Union[str, Any] = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A ) )
# verify boxes
_UpperCAmelCase : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A )
_UpperCAmelCase : List[str] = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A ) )
# verify is_crowd
_UpperCAmelCase : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A ) )
# verify class_labels
_UpperCAmelCase : Any = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A ) )
# verify orig_size
_UpperCAmelCase : Any = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A ) )
# verify size
_UpperCAmelCase : Dict = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A ) )
@slow
def snake_case_ ( self : Union[str, Any] ):
# prepare image, target and masks_path
_UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
_UpperCAmelCase : Dict = json.loads(f.read() )
_UpperCAmelCase : List[str] = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
_UpperCAmelCase : List[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
_UpperCAmelCase : List[str] = DetaImageProcessor(format="coco_panoptic" )
_UpperCAmelCase : int = image_processing(images=A , annotations=A , masks_path=A , return_tensors="pt" )
# verify pixel values
_UpperCAmelCase : Tuple = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , A )
_UpperCAmelCase : Optional[Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
_UpperCAmelCase : Optional[int] = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A ) )
# verify boxes
_UpperCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A )
_UpperCAmelCase : int = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase : List[str] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A ) )
# verify is_crowd
_UpperCAmelCase : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A ) )
# verify class_labels
_UpperCAmelCase : str = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A ) )
# verify masks
_UpperCAmelCase : Union[str, Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , A )
# verify orig_size
_UpperCAmelCase : List[str] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A ) )
# verify size
_UpperCAmelCase : int = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A ) )
| 202
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = GPTSanJapaneseTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def snake_case_ ( self : Any ):
super().setUp()
# fmt: off
_UpperCAmelCase : Any = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
_UpperCAmelCase : Optional[int] = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
_UpperCAmelCase : List[Any] = {"unk_token": "<unk>"}
_UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(A ) )
def snake_case_ ( self : int , **A : List[str] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : int , A : Any ):
_UpperCAmelCase : Optional[Any] = "こんにちは、世界。 \nこんばんは、㔺界。😀"
_UpperCAmelCase : List[Any] = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def snake_case_ ( self : Optional[Any] , A : str ):
_UpperCAmelCase , _UpperCAmelCase : str = self.get_input_output_texts(A )
_UpperCAmelCase : List[Any] = tokenizer.encode(A , add_special_tokens=A )
_UpperCAmelCase : Union[str, Any] = tokenizer.decode(A , clean_up_tokenization_spaces=A )
return text, ids
def snake_case_ ( self : Any ):
pass # TODO add if relevant
def snake_case_ ( self : Union[str, Any] ):
pass # TODO add if relevant
def snake_case_ ( self : int ):
pass # TODO add if relevant
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : List[Any] = self.get_tokenizer()
# Testing tokenization
_UpperCAmelCase : Optional[int] = "こんにちは、世界。 こんばんは、㔺界。"
_UpperCAmelCase : Dict = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
_UpperCAmelCase : List[Any] = tokenizer.tokenize(A )
self.assertListEqual(A , A )
# Testing conversion to ids without special tokens
_UpperCAmelCase : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_UpperCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , A )
# Testing conversion to ids with special tokens
_UpperCAmelCase : str = tokens + [tokenizer.unk_token]
_UpperCAmelCase : Any = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
_UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , A )
def snake_case_ ( self : Any ):
_UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
_UpperCAmelCase : Dict = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
_UpperCAmelCase : Tuple = "こんにちは、、、、世界。こんばんは、、、、世界。"
_UpperCAmelCase : int = tokenizer.encode(A )
_UpperCAmelCase : Optional[Any] = tokenizer.decode(A )
self.assertEqual(A , A )
@slow
def snake_case_ ( self : Dict ):
_UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_UpperCAmelCase : List[Any] = "こんにちは、世界。"
_UpperCAmelCase : List[str] = "こんばんは、㔺界。😀"
_UpperCAmelCase : Any = "こんにちは、世界。こんばんは、世界。😀"
_UpperCAmelCase : Union[str, Any] = tokenizer.encode(prefix_text + input_text )
_UpperCAmelCase : Tuple = tokenizer.encode("" , prefix_text=prefix_text + input_text )
_UpperCAmelCase : Optional[int] = tokenizer.encode(A , prefix_text=A )
_UpperCAmelCase : Tuple = tokenizer.decode(A )
_UpperCAmelCase : Optional[Any] = tokenizer.decode(A )
_UpperCAmelCase : Tuple = tokenizer.decode(A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
@slow
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_UpperCAmelCase : Any = "こんにちは、世界。"
_UpperCAmelCase : List[Any] = "こんばんは、㔺界。😀"
_UpperCAmelCase : Optional[Any] = len(tokenizer.encode(A ) ) - 2
_UpperCAmelCase : List[Any] = len(tokenizer.encode(A ) ) - 2
_UpperCAmelCase : List[str] = [1] + [0] * (len_prefix + len_text + 1)
_UpperCAmelCase : str = [1] * (len_prefix + len_text + 1) + [0]
_UpperCAmelCase : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
_UpperCAmelCase : Any = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
_UpperCAmelCase : List[Any] = tokenizer(A , prefix_text=A ).token_type_ids
self.assertListEqual(A , A )
self.assertListEqual(A , A )
self.assertListEqual(A , A )
@slow
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : str = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_UpperCAmelCase : Dict = tokenizer.encode("あンいワ" )
_UpperCAmelCase : str = tokenizer.encode("" , prefix_text="あンいワ" )
_UpperCAmelCase : Dict = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(A ) , tokenizer.decode(A ) )
self.assertEqual(tokenizer.decode(A ) , tokenizer.decode(A ) )
self.assertNotEqual(A , A )
self.assertNotEqual(A , A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_UpperCAmelCase : Tuple = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
_UpperCAmelCase : Tuple = tokenizer(A , padding=A )
_UpperCAmelCase : str = tokenizer.batch_encode_plus(A , padding=A )
# fmt: off
_UpperCAmelCase : str = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
_UpperCAmelCase : str = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_UpperCAmelCase : int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , A )
self.assertListEqual(x_token.token_type_ids , A )
self.assertListEqual(x_token.attention_mask , A )
self.assertListEqual(x_token_a.input_ids , A )
self.assertListEqual(x_token_a.token_type_ids , A )
self.assertListEqual(x_token_a.attention_mask , A )
def snake_case_ ( self : List[Any] ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def snake_case_ ( self : int ):
# tokenizer has no padding token
pass
| 202
| 1
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowerCAmelCase__ = random.Random()
def snake_case_ ( A_ : Tuple, A_ : List[str]=1.0, A_ : Optional[int]=None, A_ : List[str]=None ):
'''simple docstring'''
if rng is None:
_lowerCamelCase : Union[str, Any] = global_rng
_lowerCamelCase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __snake_case ( unittest.TestCase):
def __init__( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : str=4_0_0 , __lowerCAmelCase : Optional[Any]=2_0_0_0 , __lowerCAmelCase : str=1 , __lowerCAmelCase : List[str]=0.0 , __lowerCAmelCase : Dict=1_6_0_0_0 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : str=True , ):
"""simple docstring"""
_lowerCamelCase : int = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : str = min_seq_length
_lowerCamelCase : List[str] = max_seq_length
_lowerCamelCase : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase : List[Any] = feature_size
_lowerCamelCase : Dict = padding_value
_lowerCamelCase : int = sampling_rate
_lowerCamelCase : int = return_attention_mask
_lowerCamelCase : List[Any] = do_normalize
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Tuple=False ):
"""simple docstring"""
def _flatten(__lowerCAmelCase : int ):
return list(itertools.chain(*__lowerCAmelCase ) )
if equal_length:
_lowerCamelCase : str = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_lowerCamelCase : Optional[Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase : Union[str, Any] = [np.asarray(__lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Tuple = WavaVecaFeatureExtractor
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Any = WavaVecaFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(__lowerCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase : int = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase : int = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
_lowerCamelCase : List[Any] = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
_lowerCamelCase : Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# Test batched
_lowerCamelCase : List[Any] = feat_extract(__lowerCAmelCase , return_tensors='''np''' ).input_values
_lowerCamelCase : Optional[Any] = feat_extract(__lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase : List[str] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCamelCase : Optional[int] = np.asarray(__lowerCAmelCase )
_lowerCamelCase : Tuple = feat_extract(__lowerCAmelCase , return_tensors='''np''' ).input_values
_lowerCamelCase : Any = feat_extract(__lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : Dict = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase : List[str] = ['''longest''', '''max_length''', '''do_not_pad''']
_lowerCamelCase : Tuple = [None, 1_6_0_0, None]
for max_length, padding in zip(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : str = feat_extract(__lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors='''np''' )
_lowerCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : Dict = range(8_0_0 , 1_4_0_0 , 2_0_0 )
_lowerCamelCase : Tuple = [floats_list((1, x) )[0] for x in lengths]
_lowerCamelCase : Optional[Any] = ['''longest''', '''max_length''', '''do_not_pad''']
_lowerCamelCase : str = [None, 1_6_0_0, None]
for max_length, padding in zip(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : Tuple = feat_extract(__lowerCAmelCase , max_length=__lowerCAmelCase , padding=__lowerCAmelCase )
_lowerCamelCase : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase : str = feat_extract(
__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
_lowerCamelCase : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase : Union[str, Any] = feat_extract(
__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
_lowerCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
_lowerCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase : List[str] = feat_extract(
__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
_lowerCamelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
import torch
_lowerCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : Tuple = np.random.rand(1_0_0 ).astype(np.floataa )
_lowerCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase : List[Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_lowerCamelCase : List[Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_lowerCamelCase : Optional[Any] = WavaVecaConfig.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Any = WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 72
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCamelCase : Union[str, Any] = TextStreamer(__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCamelCase : int = cs.out[:-1]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : List[str] = tokenizer.decode(greedy_ids[0] )
_lowerCamelCase : Tuple = TextIteratorStreamer(__lowerCAmelCase )
_lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase )
thread.start()
_lowerCamelCase : int = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : int = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = greedy_ids[:, input_ids.shape[1] :]
_lowerCamelCase : int = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCamelCase : Any = TextStreamer(__lowerCAmelCase , skip_prompt=__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCamelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' )
_lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : str = -1
_lowerCamelCase : Any = torch.ones((1, 5) , device=__lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_lowerCamelCase : List[Any] = TextStreamer(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_lowerCamelCase : Any = cs.out[:-1] # Remove the final "\n"
_lowerCamelCase : int = tokenizer(__lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = -1
_lowerCamelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = TextIteratorStreamer(__lowerCAmelCase , timeout=0.0_01 )
_lowerCamelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 72
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _snake_case ( __lowercase ):
SCREAMING_SNAKE_CASE__ = 'pegasus'
SCREAMING_SNAKE_CASE__ = ['past_key_values']
SCREAMING_SNAKE_CASE__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _lowerCamelCase=5_0265 , _lowerCamelCase=1024 , _lowerCamelCase=12 , _lowerCamelCase=4096 , _lowerCamelCase=16 , _lowerCamelCase=12 , _lowerCamelCase=4096 , _lowerCamelCase=16 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="gelu" , _lowerCamelCase=1024 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=0 , _lowerCamelCase=False , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=1 , **_lowerCamelCase , ):
a :int = vocab_size
a :Tuple = max_position_embeddings
a :Tuple = d_model
a :Dict = encoder_ffn_dim
a :Dict = encoder_layers
a :int = encoder_attention_heads
a :List[str] = decoder_ffn_dim
a :Any = decoder_layers
a :Optional[Any] = decoder_attention_heads
a :str = dropout
a :Optional[Any] = attention_dropout
a :List[str] = activation_dropout
a :List[str] = activation_function
a :Optional[int] = init_std
a :Any = encoder_layerdrop
a :List[str] = decoder_layerdrop
a :int = use_cache
a :int = encoder_layers
a :List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , **_a , )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.d_model
| 350
|
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
if n_term == "":
return []
a :list = []
for temp in range(int(UpperCAmelCase_ ) ):
series.append(F'''1/{temp + 1}''' if series else '''1''' )
return series
if __name__ == "__main__":
snake_case : Tuple = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 281
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=4 , ):
"""simple docstring"""
snake_case : int = parent
snake_case : List[Any] = batch_size
snake_case : str = seq_length
snake_case : Optional[int] = is_training
snake_case : Optional[int] = use_attention_mask
snake_case : str = use_token_type_ids
snake_case : int = use_labels
snake_case : Any = vocab_size
snake_case : Any = hidden_size
snake_case : Any = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : Optional[Any] = intermediate_size
snake_case : List[str] = hidden_act
snake_case : Any = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : int = type_sequence_label_size
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[Any] = num_choices
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Tuple = None
if self.use_attention_mask:
snake_case : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : str = None
if self.use_token_type_ids:
snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : str = config_and_inputs
snake_case : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( lowerCamelCase_ , unittest.TestCase ):
a__ : Optional[Any] = True
a__ : List[str] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = FlaxRoFormerModelTester(self )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case : List[Any] = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=SCREAMING_SNAKE_CASE )
snake_case : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[str] = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
snake_case : Union[str, Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case : List[Any] = model(SCREAMING_SNAKE_CASE )[0]
snake_case : List[Any] = 50_000
snake_case : List[str] = (1, 6, vocab_size)
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 148
|
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
__A = "path-to-your-trained-model"
__A = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
__A = "A photo of sks dog in a bucket"
__A = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 148
| 1
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__a = logging.get_logger(__name__)
__a = r'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class __SCREAMING_SNAKE_CASE ( A__ ):
@add_start_docstrings(SCREAMING_SNAKE_CASE__ )
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : int = max_length
lowercase : Optional[int] = max_position_embeddings
@add_start_docstrings(SCREAMING_SNAKE_CASE__ )
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = input_ids.shape[-1]
lowercase : Optional[Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
'''with `max_length = start_length + max_new_tokens` instead.''' , SCREAMING_SNAKE_CASE__ , )
lowercase : Union[str, Any] = start_length
lowercase : Union[str, Any] = max_new_tokens
lowercase : str = start_length + max_new_tokens
@add_start_docstrings(SCREAMING_SNAKE_CASE__ )
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return input_ids.shape[-1] >= self.max_length
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : List[str] = max_time
lowercase : List[str] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(SCREAMING_SNAKE_CASE__ )
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return time.time() - self.initial_timestamp > self.max_time
class __SCREAMING_SNAKE_CASE ( A__ ):
@add_start_docstrings(SCREAMING_SNAKE_CASE__ )
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return any(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for criteria in self )
@property
def __lowerCamelCase ( self ):
for stopping_criterium in self:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return stopping_criterium.max_length
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return stopping_criterium.max_length
return None
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->StoppingCriteriaList:
"""simple docstring"""
lowercase : str = stopping_criteria.max_length
lowercase : List[Any] = deepcopy(_UpperCamelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''', _UpperCamelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_UpperCamelCase ) )
return new_stopping_criteria
| 173
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__a = 50_00_00
__a , __a = os.path.split(__file__)
__a = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def __lowercase ( _UpperCamelCase, **_UpperCamelCase ) ->Any:
"""simple docstring"""
lowercase : Optional[Any] = dataset.map(**_UpperCamelCase )
@get_duration
def __lowercase ( _UpperCamelCase, **_UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : int = dataset.filter(**_UpperCamelCase )
def __lowercase ( ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Dict = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Dict = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase : List[str] = generate_example_dataset(
os.path.join(_UpperCamelCase, '''dataset.arrow''' ), _UpperCamelCase, num_examples=_UpperCamelCase )
lowercase : List[Any] = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=_UpperCamelCase )
def tokenize(_UpperCamelCase ):
return tokenizer(examples['''text'''] )
lowercase : Union[str, Any] = map(_UpperCamelCase )
lowercase : Dict = map(_UpperCamelCase, batched=_UpperCamelCase )
lowercase : Tuple = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''numpy''' ):
lowercase : Dict = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''pandas''' ):
lowercase : Any = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''torch''', columns='''numbers''' ):
lowercase : str = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ):
lowercase : Tuple = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
lowercase : List[str] = map(_UpperCamelCase, function=_UpperCamelCase, batched=_UpperCamelCase )
lowercase : Any = filter(_UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_UpperCamelCase, '''wb''' ) as f:
f.write(json.dumps(_UpperCamelCase ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 173
| 1
|
'''simple docstring'''
import os
def A__ ( ):
_UpperCamelCase : List[str] = os.path.join(os.path.dirname(UpperCAmelCase_ ) , 'num.txt' )
with open(UpperCAmelCase_ ) as file_hand:
return str(sum(int(UpperCAmelCase_ ) for line in file_hand ) )[:1_0]
if __name__ == "__main__":
print(solution())
| 83
|
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
if num < 0:
return False
_UpperCamelCase : int = num
_UpperCamelCase : int = 0
while num > 0:
_UpperCamelCase : str = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83
| 1
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict=1_3 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Union[str, Any]=9_9 , lowerCAmelCase_ : Optional[Any]=3_2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[Any]=5_1_2 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : List[Any]=None , ) -> List[str]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_multiple_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = weight_tying
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def lowercase ( self : Tuple ) -> str:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase ( self : Optional[int] ) -> str:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def lowercase ( self : int ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase = True
return config, input_ids, input_mask, token_labels
def lowercase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ) -> int:
__lowerCAmelCase = GPTNeoXJapaneseModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple ) -> Dict:
__lowerCAmelCase = True
__lowerCAmelCase = GPTNeoXJapaneseModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] ) -> Dict:
__lowerCAmelCase = GPTNeoXJapaneseForCausalLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
__lowerCAmelCase = True
__lowerCAmelCase = GPTNeoXJapaneseForCausalLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# first forward pass
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
__lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ )
__lowerCAmelCase = output_from_no_past['hidden_states'][0]
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , )['hidden_states'][0]
# select random slice
__lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) )
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
a_ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
a_ = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Optional[Any] ) -> int:
__lowerCAmelCase = GPTNeoXJapaneseModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : List[str] ) -> str:
self.config_tester.run_common_tests()
def lowercase ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
# This regression test was failing with PyTorch < 1.3
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowerCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> str:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase_ )
@slow
def lowercase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase = 'abeja/gpt-neox-japanese-2.7b'
__lowerCAmelCase = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
__lowerCAmelCase = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
__lowerCAmelCase = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = []
for prompt in prompts:
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='pt' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_ , max_length=5_0 )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 207
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : List[Any] , lowerCAmelCase_ : int = 6_5_5_3_6 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : str = "fourier" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCAmelCase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCAmelCase_ : Tuple[str] = "UNetMidBlock1D" , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Tuple[int] = (3_2, 3_2, 6_4) , lowerCAmelCase_ : str = None , lowerCAmelCase_ : int = 8 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : bool = False , ) -> Optional[int]:
super().__init__()
__lowerCAmelCase = sample_size
# time
if time_embedding_type == "fourier":
__lowerCAmelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCAmelCase_ , log=lowerCAmelCase_ , flip_sin_to_cos=lowerCAmelCase_ )
__lowerCAmelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__lowerCAmelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCAmelCase_ , downscale_freq_shift=lowerCAmelCase_ )
__lowerCAmelCase = block_out_channels[0]
if use_timestep_embedding:
__lowerCAmelCase = block_out_channels[0] * 4
__lowerCAmelCase = TimestepEmbedding(
in_channels=lowerCAmelCase_ , time_embed_dim=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , out_dim=block_out_channels[0] , )
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = None
# down
__lowerCAmelCase = in_channels
for i, down_block_type in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__lowerCAmelCase = i == len(lowerCAmelCase_ ) - 1
__lowerCAmelCase = get_down_block(
lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
__lowerCAmelCase = get_mid_block(
lowerCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCAmelCase_ , add_downsample=lowerCAmelCase_ , )
# up
__lowerCAmelCase = list(reversed(lowerCAmelCase_ ) )
__lowerCAmelCase = reversed_block_out_channels[0]
if out_block_type is None:
__lowerCAmelCase = out_channels
else:
__lowerCAmelCase = block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = (
reversed_block_out_channels[i + 1] if i < len(lowerCAmelCase_ ) - 1 else final_upsample_channels
)
__lowerCAmelCase = i == len(lowerCAmelCase_ ) - 1
__lowerCAmelCase = get_up_block(
lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCAmelCase_ )
__lowerCAmelCase = output_channel
# out
__lowerCAmelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 )
__lowerCAmelCase = get_out_block(
out_block_type=lowerCAmelCase_ , num_groups_out=lowerCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Union[torch.Tensor, float, int] , lowerCAmelCase_ : bool = True , ) -> Union[UNetaDOutput, Tuple]:
__lowerCAmelCase = timestep
if not torch.is_tensor(lowerCAmelCase_ ):
__lowerCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCAmelCase_ ) and len(timesteps.shape ) == 0:
__lowerCAmelCase = timesteps[None].to(sample.device )
__lowerCAmelCase = self.time_proj(lowerCAmelCase_ )
if self.config.use_timestep_embedding:
__lowerCAmelCase = self.time_mlp(lowerCAmelCase_ )
else:
__lowerCAmelCase = timestep_embed[..., None]
__lowerCAmelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
__lowerCAmelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
__lowerCAmelCase = ()
for downsample_block in self.down_blocks:
__lowerCAmelCase , __lowerCAmelCase = downsample_block(hidden_states=lowerCAmelCase_ , temb=lowerCAmelCase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__lowerCAmelCase = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
__lowerCAmelCase = down_block_res_samples[-1:]
__lowerCAmelCase = down_block_res_samples[:-1]
__lowerCAmelCase = upsample_block(lowerCAmelCase_ , res_hidden_states_tuple=lowerCAmelCase_ , temb=lowerCAmelCase_ )
# 5. post-process
if self.out_block:
__lowerCAmelCase = self.out_block(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCAmelCase_ )
| 207
| 1
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
lowercase__ : Dict = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def lowerCamelCase__ ( _A = "dhaka" , _A = 5 ):
'''simple docstring'''
snake_case_ = min(A__ , 50 ) # Prevent abuse!
snake_case_ = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
snake_case_ = requests.get("https://www.google.com/search" , params=A__ , headers=A__ )
snake_case_ = BeautifulSoup(html.text , "html.parser" )
snake_case_ = "".join(
re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
snake_case_ = json.dumps(A__ )
snake_case_ = json.loads(A__ )
snake_case_ = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , A__ , )
if not matched_google_image_data:
return 0
snake_case_ = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(A__ ) , )
snake_case_ = re.findall(
R"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , A__ , )
for index, fixed_full_res_image in enumerate(A__ ):
if index >= max_images:
return index
snake_case_ = bytes(A__ , "ascii" ).decode(
"unicode-escape" )
snake_case_ = bytes(A__ , "ascii" ).decode(
"unicode-escape" )
snake_case_ = urllib.request.build_opener()
snake_case_ = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(A__ )
snake_case_ = f"query_{query.replace(' ' , '_' )}"
if not os.path.exists(A__ ):
os.makedirs(A__ )
urllib.request.urlretrieve( # noqa: S310
A__ , f"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
lowercase__ : Tuple = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print("Please provide a search term.")
raise
| 187
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def snake_case ( A__ ):
UpperCAmelCase_ : Dict = SwinConfig(image_size=1_92 )
if "base" in model_name:
UpperCAmelCase_ : Any = 6
UpperCAmelCase_ : Optional[Any] = 1_28
UpperCAmelCase_ : Optional[int] = (2, 2, 18, 2)
UpperCAmelCase_ : List[str] = (4, 8, 16, 32)
elif "large" in model_name:
UpperCAmelCase_ : Dict = 12
UpperCAmelCase_ : int = 1_92
UpperCAmelCase_ : List[Any] = (2, 2, 18, 2)
UpperCAmelCase_ : int = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
UpperCAmelCase_ : str = window_size
UpperCAmelCase_ : Any = embed_dim
UpperCAmelCase_ : int = depths
UpperCAmelCase_ : Any = num_heads
return config
def snake_case ( A__ ):
if "encoder.mask_token" in name:
UpperCAmelCase_ : str = name.replace("encoder.mask_token" ,"embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
UpperCAmelCase_ : Optional[int] = name.replace("encoder.patch_embed.proj" ,"embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
UpperCAmelCase_ : List[str] = name.replace("encoder.patch_embed.norm" ,"embeddings.norm" )
if "attn.proj" in name:
UpperCAmelCase_ : Optional[Any] = name.replace("attn.proj" ,"attention.output.dense" )
if "attn" in name:
UpperCAmelCase_ : Any = name.replace("attn" ,"attention.self" )
if "norm1" in name:
UpperCAmelCase_ : str = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name:
UpperCAmelCase_ : Tuple = name.replace("norm2" ,"layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase_ : List[str] = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase_ : str = name.replace("mlp.fc2" ,"output.dense" )
if name == "encoder.norm.weight":
UpperCAmelCase_ : List[str] = "layernorm.weight"
if name == "encoder.norm.bias":
UpperCAmelCase_ : int = "layernorm.bias"
if "decoder" in name:
pass
else:
UpperCAmelCase_ : Any = "swin." + name
return name
def snake_case ( A__ ,A__ ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : Tuple = orig_state_dict.pop(A__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
UpperCAmelCase_ : Optional[int] = key.split("." )
UpperCAmelCase_ : str = int(key_split[2] )
UpperCAmelCase_ : Union[str, Any] = int(key_split[4] )
UpperCAmelCase_ : Optional[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase_ : List[Any] = val[:dim, :]
UpperCAmelCase_ : str = val[
dim : dim * 2, :
]
UpperCAmelCase_ : str = val[-dim:, :]
else:
UpperCAmelCase_ : List[str] = val[
:dim
]
UpperCAmelCase_ : str = val[
dim : dim * 2
]
UpperCAmelCase_ : Optional[Any] = val[
-dim:
]
else:
UpperCAmelCase_ : Tuple = val
return orig_state_dict
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[Any] = torch.load(A__ ,map_location="cpu" )["model"]
UpperCAmelCase_ : Optional[Any] = get_swin_config(A__ )
UpperCAmelCase_ : List[Any] = SwinForMaskedImageModeling(A__ )
model.eval()
UpperCAmelCase_ : str = convert_state_dict(A__ ,A__ )
model.load_state_dict(A__ )
UpperCAmelCase_ : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : int = ViTImageProcessor(size={"height": 1_92, "width": 1_92} )
UpperCAmelCase_ : Any = Image.open(requests.get(A__ ,stream=A__ ).raw )
UpperCAmelCase_ : Any = image_processor(images=A__ ,return_tensors="pt" )
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(**A__ ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A__ )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase_ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 268
| 0
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 281
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : List[str] = logging.get_logger(__name__)
snake_case : Optional[Any] = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
snake_case : str = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
snake_case : List[Any] = {
'''vinai/phobert-base''': 2_56,
'''vinai/phobert-large''': 2_56,
}
def __lowerCamelCase ( UpperCAmelCase_ : List[str] ):
"""simple docstring"""
a :Union[str, Any] = set()
a :str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a :Optional[int] = char
a :Optional[int] = set(UpperCAmelCase_ )
return pairs
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , **_lowerCamelCase , ):
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , )
a :Optional[Any] = vocab_file
a :Optional[Any] = merges_file
a :Any = {}
a :Any = 0
a :int = 1
a :Union[str, Any] = 2
a :List[Any] = 3
self.add_from_file(_lowerCamelCase )
a :List[str] = {v: k for k, v in self.encoder.items()}
with open(_lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
a :List[str] = merges_handle.read().split('''\n''' )[:-1]
a :Any = [tuple(merge.split()[:-1] ) for merge in merges]
a :str = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
a :str = {}
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a :Union[str, Any] = [self.cls_token_id]
a :Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
a :Optional[int] = [self.sep_token_id]
a :Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
if token in self.cache:
return self.cache[token]
a :Optional[int] = tuple(_lowerCamelCase )
a :List[str] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
a :Union[str, Any] = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
a :Optional[Any] = min(_lowerCamelCase , key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
a , a :Dict = bigram
a :Union[str, Any] = []
a :int = 0
while i < len(_lowerCamelCase ):
try:
a :Optional[Any] = word.index(_lowerCamelCase , _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a :Union[str, Any] = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a :Union[str, Any] = tuple(_lowerCamelCase )
a :int = new_word
if len(_lowerCamelCase ) == 1:
break
else:
a :List[str] = get_pairs(_lowerCamelCase )
a :Union[str, Any] = '''@@ '''.join(_lowerCamelCase )
a :Dict = word[:-4]
a :Any = word
return word
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Union[str, Any] = []
a :str = re.findall(R'''\S+\n?''' , _lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(_lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return self.decoder.get(_lowerCamelCase , self.unk_token )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Optional[int] = ''' '''.join(_lowerCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a :Tuple = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
a :Optional[int] = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.merges_file , _lowerCamelCase )
return out_vocab_file, out_merge_file
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
try:
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(_lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
a :str = f.readlines()
for lineTmp in lines:
a :Tuple = lineTmp.strip()
a :int = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
a :Tuple = line[:idx]
a :Tuple = len(self.encoder )
| 281
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase_ = """platform"""
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowerCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=None , ) ->Union[str, Any]:
if attention_mask is None:
_SCREAMING_SNAKE_CASE = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_SCREAMING_SNAKE_CASE = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class a_ :
'''simple docstring'''
def __init__( self , A , A=13 , A=7 , A=True , A=False , A=99 , A=16 , A=2 , A=4 , A=4 , A="gelu" , A=0.1 , A=0.1 , A=32 , A=2 , A=1 , A=0 , A=0.02 , ) -> int:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = eos_token_id
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = bos_token_id
_SCREAMING_SNAKE_CASE = initializer_range
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_SCREAMING_SNAKE_CASE = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_SCREAMING_SNAKE_CASE = shift_tokens_right(A , 1 , 2 )
_SCREAMING_SNAKE_CASE = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=A , )
_SCREAMING_SNAKE_CASE = prepare_blenderbot_inputs_dict(A , A , A )
return config, inputs_dict
def snake_case_( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case_( self , A , A , A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = 20
_SCREAMING_SNAKE_CASE = model_class_name(A )
_SCREAMING_SNAKE_CASE = model.encode(inputs_dict["""input_ids"""] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , A , A )
_SCREAMING_SNAKE_CASE = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
_SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , A , decoder_attention_mask=A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=A , )
_SCREAMING_SNAKE_CASE = model.decode(A , A )
_SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def snake_case_( self , A , A , A ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = 20
_SCREAMING_SNAKE_CASE = model_class_name(A )
_SCREAMING_SNAKE_CASE = model.encode(inputs_dict["""input_ids"""] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_SCREAMING_SNAKE_CASE = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , A , A )
_SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
_SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=A , decoder_position_ids=A , )
_SCREAMING_SNAKE_CASE = model.decode(A , A , decoder_attention_mask=A )
_SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = 99
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_SCREAMING_SNAKE_CASE = input_ids.shape[0]
_SCREAMING_SNAKE_CASE = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_config_and_data()
_SCREAMING_SNAKE_CASE = FlaxBlenderbotSmallForConditionalGeneration(A )
_SCREAMING_SNAKE_CASE = lm_model(input_ids=A )
_SCREAMING_SNAKE_CASE = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , A )
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_SCREAMING_SNAKE_CASE = FlaxBlenderbotSmallForConditionalGeneration(A )
_SCREAMING_SNAKE_CASE = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE = lm_model(input_ids=A , decoder_input_ids=A )
_SCREAMING_SNAKE_CASE = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , A )
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE = shift_tokens_right(A , 1 , 2 )
_SCREAMING_SNAKE_CASE = np.equal(A , 1 ).astype(np.floataa ).sum()
_SCREAMING_SNAKE_CASE = np.equal(A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class a_ ( snake_case_ , unittest.TestCase , snake_case_ ):
'''simple docstring'''
UpperCamelCase = True
UpperCamelCase = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCamelCase = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = FlaxBlenderbotSmallModelTester(self )
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A , A , A )
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A , A , A )
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_SCREAMING_SNAKE_CASE = self._prepare_for_class(A , A )
_SCREAMING_SNAKE_CASE = model_class(A )
@jax.jit
def encode_jitted(A , A=None , **A ):
return model.encode(input_ids=A , attention_mask=A )
with self.subTest("""JIT Enabled""" ):
_SCREAMING_SNAKE_CASE = encode_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE = encode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_SCREAMING_SNAKE_CASE = model_class(A )
_SCREAMING_SNAKE_CASE = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_SCREAMING_SNAKE_CASE = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(A , A , A ):
return model.decode(
decoder_input_ids=A , decoder_attention_mask=A , encoder_outputs=A , )
with self.subTest("""JIT Enabled""" ):
_SCREAMING_SNAKE_CASE = decode_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE = decode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case_( self ) -> str:
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""facebook/blenderbot_small-90M""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_SCREAMING_SNAKE_CASE = np.ones((1, 1) ) * model.config.eos_token_id
_SCREAMING_SNAKE_CASE = model(A )
self.assertIsNotNone(A )
| 58
|
"""simple docstring"""
import re
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" )
if match := re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 153
| 0
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> List[Any]:
_a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , '''num_heads''' ) )
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=[16, 48, 96] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-1_2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ) -> Dict:
_a = parent
_a = batch_size
_a = image_size
_a = patch_sizes
_a = patch_stride
_a = patch_padding
_a = is_training
_a = use_labels
_a = num_labels
_a = num_channels
_a = embed_dim
_a = num_heads
_a = stride_kv
_a = depth
_a = cls_token
_a = attention_drop_rate
_a = initializer_range
_a = layer_norm_eps
def _UpperCAmelCase ( self ) -> Tuple:
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
# create a random int32 tensor of given shape
_a = ids_tensor([self.batch_size] , self.num_labels )
_a = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
_a = TFCvtModel(config=__UpperCAmelCase )
_a = model(__UpperCAmelCase , training=__UpperCAmelCase )
_a = (self.image_size, self.image_size)
_a = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_a = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_a = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
_a = self.num_labels
_a = TFCvtForImageClassification(__UpperCAmelCase )
_a = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = self.prepare_config_and_inputs()
_a = config_and_inputs
_a = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
A_ : List[Any] = (
{'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification}
if is_tf_available()
else {}
)
A_ : Any = False
A_ : Optional[Any] = False
A_ : Any = False
A_ : List[str] = False
A_ : List[str] = False
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = TFCvtModelTester(self )
_a = TFCvtConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def _UpperCAmelCase ( self ) -> str:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def _UpperCAmelCase ( self ) -> int:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def _UpperCAmelCase ( self ) -> Any:
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def _UpperCAmelCase ( self ) -> List[Any]:
_a = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(__UpperCAmelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def _UpperCAmelCase ( self ) -> Any:
_a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__UpperCAmelCase )
_a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_a = model_class(__UpperCAmelCase )
_a = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
_a = outputs.hidden_states
_a = len(self.model_tester.depth )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Any:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFCvtModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A_ ( ):
"""simple docstring"""
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCAmelCase ( self ) -> str:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=__UpperCAmelCase , return_tensors='''tf''' )
# forward pass
_a = model(**__UpperCAmelCase )
# verify the logits
_a = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
_a = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __UpperCAmelCase , atol=1e-4 ) )
| 359
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a__ )
class __lowerCamelCase ( a__ ):
'''simple docstring'''
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
A_ : str = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
A_ : ClassVar[Features] = Features({'text': Value('string' )} )
A_ : ClassVar[Features] = Features({'summary': Value('string' )} )
A_ : str = "text"
A_ : str = "summary"
@property
def _UpperCAmelCase ( self ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 153
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''audio-spectrogram-transformer'''
def __init__( self , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3_072 , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=1e-1_2 , snake_case_=16 , snake_case_=True , snake_case_=10 , snake_case_=10 , snake_case_=1_024 , snake_case_=128 , **snake_case_ , ) -> List[str]:
super().__init__(**snake_case_ )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = patch_size
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = frequency_stride
__lowerCAmelCase = time_stride
__lowerCAmelCase = max_length
__lowerCAmelCase = num_mel_bins
| 301
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = RealmTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[int]:
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**snake_case_ )
__lowerCAmelCase = do_lower_case
def A__ ( self , snake_case_ , **snake_case_ ) -> Tuple:
__lowerCAmelCase = PaddingStrategy.MAX_LENGTH
__lowerCAmelCase = text
__lowerCAmelCase = kwargs.pop("""text_pair""" , snake_case_ )
__lowerCAmelCase = kwargs.pop("""return_tensors""" , snake_case_ )
__lowerCAmelCase = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(snake_case_ ):
if batch_text_pair is not None:
__lowerCAmelCase = batch_text_pair[idx]
else:
__lowerCAmelCase = None
__lowerCAmelCase = super().__call__(snake_case_ , snake_case_ , return_tensors=snake_case_ , **snake_case_ )
__lowerCAmelCase = encoded_candidates.get("""input_ids""" )
__lowerCAmelCase = encoded_candidates.get("""attention_mask""" )
__lowerCAmelCase = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(snake_case_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(snake_case_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(snake_case_ )
__lowerCAmelCase = {key: item for key, item in output_data.items() if len(snake_case_ ) != 0}
return BatchEncoding(snake_case_ , tensor_type=snake_case_ )
def A__ ( self , snake_case_ , snake_case_=None ) -> Optional[int]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 301
| 1
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 10**-10 ):
_SCREAMING_SNAKE_CASE : int = a
while True:
_SCREAMING_SNAKE_CASE : int = Decimal(__lowerCamelCase ) - (
Decimal(eval(__lowerCamelCase ) ) / Decimal(eval(str(diff(__lowerCamelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__lowerCamelCase ) ) < precision: # noqa: S307
return float(__lowerCamelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 325
|
from __future__ import annotations
import math
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__lowerCamelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
return min(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 34423]
_SCREAMING_SNAKE_CASE : Tuple = math.log(len(__lowerCamelCase ), 2 )
print("Optimal value : ", end="" )
print(minimax(0, 0, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 325
| 1
|
from statistics import mean
import numpy as np
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0
# Number of processes finished
lowercase__ = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowercase__ = [0] * no_of_process
# List to include calculation results
lowercase__ = [0] * no_of_process
# Sort by arrival time.
lowercase__ = [burst_time[i] for i in np.argsort(lowerCamelCase_ )]
lowercase__ = [process_name[i] for i in np.argsort(lowerCamelCase_ )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowercase__ = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowercase__ = arrival_time[i]
lowercase__ = 0
# Index showing the location of the process being performed
lowercase__ = 0
# Saves the current response ratio.
lowercase__ = 0
for i in range(0 , lowerCamelCase_ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowercase__ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowercase__ = temp
lowercase__ = i
# Calculate the turn around time
lowercase__ = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowercase__ = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = [0] * no_of_process
for i in range(0 , lowerCamelCase_ ):
lowercase__ = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
A__ : int = 5
A__ : Any = ['A', 'B', 'C', 'D', 'E']
A__ : Optional[int] = [1, 2, 3, 4, 5]
A__ : Union[str, Any] = [1, 2, 3, 4, 5]
A__ : Tuple = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
A__ : str = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time')
for i in range(0, no_of_process):
print(
F"{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"
F"{turn_around_time[i]}\t\t\t{waiting_time[i]}"
)
print(F"average waiting time : {mean(waiting_time):.5f}")
print(F"average turn around time : {mean(turn_around_time):.5f}")
| 207
|
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = [1]
lowercase__ , lowercase__ , lowercase__ = 0, 0, 0
lowercase__ = ugly_nums[ia] * 2
lowercase__ = ugly_nums[ia] * 3
lowercase__ = ugly_nums[ia] * 5
for _ in range(1 , lowerCamelCase_ ):
lowercase__ = min(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
ugly_nums.append(lowerCamelCase_ )
if next_num == next_a:
ia += 1
lowercase__ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowercase__ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowercase__ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(2_00) = }")
| 207
| 1
|
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = ComputeEnvironment.AMAZON_SAGEMAKER
__lowerCAmelCase = True
__lowerCAmelCase = 'ml.p3.2xlarge'
__lowerCAmelCase = 'accelerate_sagemaker_execution_role'
__lowerCAmelCase = 'hf-sm'
__lowerCAmelCase = 'us-east-1'
__lowerCAmelCase = 1
__lowerCAmelCase = 'accelerate-sagemaker-1'
__lowerCAmelCase = '1.6'
__lowerCAmelCase = '4.4'
__lowerCAmelCase = 'train.py'
__lowerCAmelCase = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
__lowerCAmelCase = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] , _SCREAMING_SNAKE_CASE )
assert isinstance(converted_args["""do_train"""] , _SCREAMING_SNAKE_CASE )
assert isinstance(converted_args["""epochs"""] , _SCREAMING_SNAKE_CASE )
assert isinstance(converted_args["""learning_rate"""] , _SCREAMING_SNAKE_CASE )
assert isinstance(converted_args["""max_steps"""] , _SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 364
|
from math import pi
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 165
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : str = {
"""configuration_lxmert""": ["""LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LxmertConfig"""],
"""tokenization_lxmert""": ["""LxmertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] = ["""LxmertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict = [
"""LxmertEncoder""",
"""LxmertForPreTraining""",
"""LxmertForQuestionAnswering""",
"""LxmertModel""",
"""LxmertPreTrainedModel""",
"""LxmertVisualFeatureEncoder""",
"""LxmertXLayer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict = [
"""TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLxmertForPreTraining""",
"""TFLxmertMainLayer""",
"""TFLxmertModel""",
"""TFLxmertPreTrainedModel""",
"""TFLxmertVisualFeatureEncoder""",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_A : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 202
|
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_A : int = """CompVis/stable-diffusion-v1-1"""
_A : Any = """CompVis/stable-diffusion-v1-2"""
_A : Optional[int] = """CompVis/stable-diffusion-v1-3"""
_A : Union[str, Any] = """CompVis/stable-diffusion-v1-4"""
class a__ ( a_ ):
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a = True , ):
super()._init_()
lowercase : Optional[Any] = StableDiffusionPipeline.from_pretrained(_a )
lowercase : str = StableDiffusionPipeline.from_pretrained(_a )
lowercase : Dict = StableDiffusionPipeline.from_pretrained(_a )
lowercase : Union[str, Any] = StableDiffusionPipeline(
vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , requires_safety_checker=_a , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __magic_name__ ( self ):
return {k: getattr(self , _a ) for k in self.config.keys() if not k.startswith("_" )}
def __magic_name__ ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __magic_name__ ( self ):
self.enable_attention_slicing(_a )
@torch.no_grad()
def __magic_name__ ( self , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
return self.pipea(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
@torch.no_grad()
def __magic_name__ ( self , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
return self.pipea(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
@torch.no_grad()
def __magic_name__ ( self , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
return self.pipea(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
@torch.no_grad()
def __magic_name__ ( self , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
return self.pipea(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
@torch.no_grad()
def __magic_name__ ( self , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
lowercase : List[Any] = "cuda" if torch.cuda.is_available() else "cpu"
self.to(_a )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
lowercase : List[Any] = self.textaimg_sda_a(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowercase : Any = self.textaimg_sda_a(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowercase : str = self.textaimg_sda_a(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowercase : Optional[int] = self.textaimg_sda_a(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 202
| 1
|
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowercase_ = sys.version_info >= (3, 10)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Optional[int]=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__SCREAMING_SNAKE_CASE )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : int
A : float
A : str
A : bool
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : int = 42
A : str = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : bool = False
A : bool = True
A : Optional[bool] = None
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Tuple = "titi"
A : List[str] = "toto"
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[Any] = "titi"
A : Tuple = "toto"
A : Optional[int] = 42
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : BasicEnum = "toto"
def snake_case__ ( self : Any ):
__snake_case : Tuple = BasicEnum(self.foo )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : MixedTypeEnum = "toto"
def snake_case__ ( self : Optional[Any] ):
__snake_case : Optional[int] = MixedTypeEnum(self.foo )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : Optional[int] = None
A : Optional[float] = field(default=__UpperCamelCase , metadata={"help": "help message"} )
A : Optional[str] = None
A : Optional[List[str]] = list_field(default=[] )
A : Optional[List[int]] = list_field(default=[] )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : List[int] = list_field(default=[] )
A : List[int] = list_field(default=[1, 2, 3] )
A : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
A : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : List[int] = field()
A : str = field()
A : BasicEnum = field()
def snake_case__ ( self : Optional[Any] ):
__snake_case : Union[str, Any] = BasicEnum(self.required_enum )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : int
A : "BasicEnum" = field()
A : "Optional[bool]" = None
A : "str" = field(default="toto" , metadata={"help": "help message"} )
A : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : bool = False
A : bool = True
A : bool | None = None
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : int | None = None
A : float | None = field(default=__UpperCamelCase , metadata={"help": "help message"} )
A : str | None = None
A : list[str] | None = list_field(default=[] )
A : list[int] | None = list_field(default=[] )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Dict , _lowerCAmelCase : argparse.ArgumentParser , _lowerCAmelCase : argparse.ArgumentParser ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__snake_case : Optional[Any] = {k: v for k, v in vars(_lowerCAmelCase ).items() if k != """container"""}
__snake_case : Tuple = {k: v for k, v in vars(_lowerCAmelCase ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , _lowerCAmelCase ) and yy.get("""choices""" , _lowerCAmelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](_lowerCAmelCase ) , yy["""type"""](_lowerCAmelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Dict ):
__snake_case : int = HfArgumentParser(_lowerCAmelCase )
__snake_case : Tuple = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_lowerCAmelCase , required=_lowerCAmelCase )
expected.add_argument("""--bar""" , type=_lowerCAmelCase , required=_lowerCAmelCase )
expected.add_argument("""--baz""" , type=_lowerCAmelCase , required=_lowerCAmelCase )
expected.add_argument("""--flag""" , type=_lowerCAmelCase , default=_lowerCAmelCase , const=_lowerCAmelCase , nargs="""?""" )
self.argparsersEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Dict = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((__snake_case) , ) : str = parser.parse_args_into_dataclasses(_lowerCAmelCase , look_for_args_file=_lowerCAmelCase )
self.assertFalse(example.flag )
def snake_case__ ( self : List[str] ):
__snake_case : str = HfArgumentParser(_lowerCAmelCase )
__snake_case : Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=_lowerCAmelCase )
expected.add_argument("""--baz""" , default="""toto""" , type=_lowerCAmelCase , help="""help message""" )
self.argparsersEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_lowerCAmelCase , default=_lowerCAmelCase , const=_lowerCAmelCase , nargs="""?""" )
expected.add_argument("""--baz""" , type=_lowerCAmelCase , default=_lowerCAmelCase , const=_lowerCAmelCase , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=_lowerCAmelCase , dest="""baz""" )
expected.add_argument("""--opt""" , type=_lowerCAmelCase , default=_lowerCAmelCase )
__snake_case : List[str] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCAmelCase )
for dataclass_type in dataclass_types:
__snake_case : str = HfArgumentParser(_lowerCAmelCase )
self.argparsersEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Any = parser.parse_args([] )
self.assertEqual(_lowerCAmelCase , Namespace(foo=_lowerCAmelCase , baz=_lowerCAmelCase , opt=_lowerCAmelCase ) )
__snake_case : List[Any] = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(_lowerCAmelCase , Namespace(foo=_lowerCAmelCase , baz=_lowerCAmelCase , opt=_lowerCAmelCase ) )
__snake_case : Optional[int] = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(_lowerCAmelCase , Namespace(foo=_lowerCAmelCase , baz=_lowerCAmelCase , opt=_lowerCAmelCase ) )
__snake_case : int = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(_lowerCAmelCase , Namespace(foo=_lowerCAmelCase , baz=_lowerCAmelCase , opt=_lowerCAmelCase ) )
__snake_case : Tuple = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(_lowerCAmelCase , Namespace(foo=_lowerCAmelCase , baz=_lowerCAmelCase , opt=_lowerCAmelCase ) )
def snake_case__ ( self : List[str] ):
__snake_case : Optional[Any] = HfArgumentParser(_lowerCAmelCase )
__snake_case : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : str = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
__snake_case : Optional[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__snake_case : Tuple = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
__snake_case : str = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__snake_case : List[str] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
__snake_case : List[Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def snake_case__ ( self : Dict ):
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : Literal["titi", "toto", 42] = "toto"
__snake_case : Any = HfArgumentParser(_lowerCAmelCase )
__snake_case : int = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
__snake_case : List[Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
__snake_case : Tuple = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def snake_case__ ( self : Dict ):
__snake_case : Optional[int] = HfArgumentParser(_lowerCAmelCase )
__snake_case : List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=_lowerCAmelCase )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=_lowerCAmelCase )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=_lowerCAmelCase )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=_lowerCAmelCase )
self.argparsersEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : int = parser.parse_args([] )
self.assertEqual(
_lowerCAmelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
__snake_case : Tuple = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(_lowerCAmelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def snake_case__ ( self : str ):
__snake_case : List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=_lowerCAmelCase , type=_lowerCAmelCase )
expected.add_argument("""--bar""" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="""help message""" )
expected.add_argument("""--baz""" , default=_lowerCAmelCase , type=_lowerCAmelCase )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=_lowerCAmelCase )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=_lowerCAmelCase )
__snake_case : Optional[Any] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCAmelCase )
for dataclass_type in dataclass_types:
__snake_case : Optional[Any] = HfArgumentParser(_lowerCAmelCase )
self.argparsersEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCAmelCase , Namespace(foo=_lowerCAmelCase , bar=_lowerCAmelCase , baz=_lowerCAmelCase , ces=[] , des=[] ) )
__snake_case : List[Any] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(_lowerCAmelCase , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : List[str] = HfArgumentParser(_lowerCAmelCase )
__snake_case : Any = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=_lowerCAmelCase , required=_lowerCAmelCase )
expected.add_argument("""--required_str""" , type=_lowerCAmelCase , required=_lowerCAmelCase )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=_lowerCAmelCase , )
self.argparsersEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : str ):
__snake_case : Optional[Any] = HfArgumentParser(_lowerCAmelCase )
__snake_case : Dict = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_lowerCAmelCase , required=_lowerCAmelCase )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=_lowerCAmelCase , )
expected.add_argument("""--opt""" , type=_lowerCAmelCase , default=_lowerCAmelCase )
expected.add_argument("""--baz""" , default="""toto""" , type=_lowerCAmelCase , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=_lowerCAmelCase )
self.argparsersEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[int] ):
__snake_case : str = HfArgumentParser(_lowerCAmelCase )
__snake_case : int = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
__snake_case : Tuple = parser.parse_dict(_lowerCAmelCase )[0]
__snake_case : int = BasicExample(**_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[int] ):
__snake_case : Optional[Any] = HfArgumentParser(_lowerCAmelCase )
__snake_case : str = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(_lowerCAmelCase , parser.parse_dict , _lowerCAmelCase , allow_extra_keys=_lowerCAmelCase )
def snake_case__ ( self : Any ):
__snake_case : Optional[int] = HfArgumentParser(_lowerCAmelCase )
__snake_case : int = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Tuple = os.path.join(_lowerCAmelCase , """temp_json""" )
os.mkdir(_lowerCAmelCase )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : List[str] = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
__snake_case : int = BasicExample(**_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : List[Any] = HfArgumentParser(_lowerCAmelCase )
__snake_case : Any = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : int = os.path.join(_lowerCAmelCase , """temp_yaml""" )
os.mkdir(_lowerCAmelCase )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : str = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
__snake_case : Tuple = BasicExample(**_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : List[str] = HfArgumentParser(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
| 20
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : Dict , *_lowerCAmelCase : Dict , **_lowerCAmelCase : int ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[str]=None ):
__snake_case : Optional[Any] = {}
__snake_case : int = {}
if prompt is not None:
__snake_case : Dict = prompt
if generate_kwargs is not None:
__snake_case : List[Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__snake_case : Optional[int] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
__snake_case : Any = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , _lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_lowerCAmelCase : Union[str, Any] ):
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str]=None ):
__snake_case : Optional[Any] = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
__snake_case : Tuple = self.model.config.model_type
if model_type == "git":
__snake_case : Tuple = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
__snake_case : Any = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
__snake_case : Tuple = [self.tokenizer.cls_token_id] + input_ids
__snake_case : int = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
__snake_case : Dict = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__snake_case : int = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
__snake_case : Optional[Any] = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
__snake_case : Tuple = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__snake_case : int = None
return model_inputs
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
__snake_case : List[Any] = None
if generate_kwargs is None:
__snake_case : Dict = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__snake_case : Dict = model_inputs.pop(self.model.main_input_name )
__snake_case : Optional[int] = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def snake_case__ ( self : List[Any] , _lowerCAmelCase : str ):
__snake_case : Union[str, Any] = []
for output_ids in model_outputs:
__snake_case : Union[str, Any] = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 20
| 1
|
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__UpperCAmelCase : str = mf_knapsack(i - 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
__UpperCAmelCase : Optional[Any] = max(
mf_knapsack(i - 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , mf_knapsack(i - 1 , lowerCAmelCase__ , lowerCAmelCase__ , j - wt[i - 1] ) + val[i - 1] , )
__UpperCAmelCase : List[str] = val
return f[i][j]
def lowercase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase : int = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__UpperCAmelCase : List[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__UpperCAmelCase : Optional[Any] = dp[i - 1][w_]
return dp[n][w_], dp
def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : list , lowerCAmelCase__ : list ):
"""simple docstring"""
if not (isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(lowerCAmelCase__ , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
__UpperCAmelCase : int = len(lowerCAmelCase__ )
if num_items != len(lowerCAmelCase__ ):
__UpperCAmelCase : Dict = (
"""The number of weights must be the same as the number of values.\n"""
f'But got {num_items} weights and {len(lowerCAmelCase__ )} values'
)
raise ValueError(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
if not isinstance(wt[i] , lowerCAmelCase__ ):
__UpperCAmelCase : int = (
"""All weights must be integers but got weight of """
f'type {type(wt[i] )} at index {i}'
)
raise TypeError(lowerCAmelCase__ )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__UpperCAmelCase : set = set()
_construct_solution(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return optimal_val, example_optional_set
def lowercase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : set ):
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowerCAmelCase__ , lowerCAmelCase__ , i - 1 , lowerCAmelCase__ , lowerCAmelCase__ )
else:
optimal_set.add(lowerCAmelCase__ )
_construct_solution(lowerCAmelCase__ , lowerCAmelCase__ , i - 1 , j - wt[i - 1] , lowerCAmelCase__ )
if __name__ == "__main__":
_UpperCamelCase = [3, 2, 4, 4]
_UpperCamelCase = [4, 3, 2, 3]
_UpperCamelCase = 4
_UpperCamelCase = 6
_UpperCamelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_UpperCamelCase , _UpperCamelCase = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_UpperCamelCase , _UpperCamelCase = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 254
|
'''simple docstring'''
_UpperCamelCase = '''
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_UpperCamelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_UpperCamelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 254
| 1
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_a : int = logging.getLogger(__name__)
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[int] ="""summarization"""
a : List[str] =["""loss"""]
a : List[Any] =ROUGE_KEYS
a : Tuple ="""rouge2"""
def __init__( self,__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
__lowerCAmelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(__SCREAMING_SNAKE_CASE,num_labels=__SCREAMING_SNAKE_CASE,mode=self.mode,**__SCREAMING_SNAKE_CASE )
use_task_specific_params(self.model,"""summarization""" )
save_git_info(self.hparams.output_dir )
__lowerCAmelCase = Path(self.output_dir ) / """metrics.json"""
__lowerCAmelCase = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams,self.hparams_save_path )
__lowerCAmelCase = 0
__lowerCAmelCase = defaultdict(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.config.model_type
__lowerCAmelCase = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
__lowerCAmelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
__lowerCAmelCase = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
__lowerCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__lowerCAmelCase = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__lowerCAmelCase = get_git_info()["""repo_sha"""]
__lowerCAmelCase = hparams.num_workers
__lowerCAmelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer,__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__lowerCAmelCase = self.decoder_start_token_id
__lowerCAmelCase = (
SeqaSeqDataset if hasattr(self.tokenizer,"""prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
__lowerCAmelCase = False
__lowerCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__lowerCAmelCase = self.hparams.eval_max_gen_length
else:
__lowerCAmelCase = self.model.config.max_length
__lowerCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(__SCREAMING_SNAKE_CASE,Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()},Path(self.output_dir ) / """tok_batch.json""" )
__lowerCAmelCase = True
return readable_batch
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.model(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.tokenizer.batch_decode(
__SCREAMING_SNAKE_CASE,skip_special_tokens=__SCREAMING_SNAKE_CASE,clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
return lmap(str.strip,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.tokenizer.pad_token_id
__lowerCAmelCase , __lowerCAmelCase = batch["""input_ids"""], batch["""attention_mask"""]
__lowerCAmelCase = batch["""labels"""]
if isinstance(self.model,__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = self.model._shift_right(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = shift_tokens_right(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__lowerCAmelCase = decoder_input_ids
self.save_readable_batch(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,decoder_input_ids=__SCREAMING_SNAKE_CASE,use_cache=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__lowerCAmelCase = nn.CrossEntropyLoss(ignore_index=__SCREAMING_SNAKE_CASE )
assert lm_logits.shape[-1] == self.vocab_size
__lowerCAmelCase = ce_loss_fct(lm_logits.view(-1,lm_logits.shape[-1] ),tgt_ids.view(-1 ) )
else:
__lowerCAmelCase = nn.functional.log_softmax(__SCREAMING_SNAKE_CASE,dim=-1 )
__lowerCAmelCase , __lowerCAmelCase = label_smoothed_nll_loss(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,self.hparams.label_smoothing,ignore_index=__SCREAMING_SNAKE_CASE )
return (loss,)
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.tokenizer.pad_token_id
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self._step(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = dict(zip(self.loss_names,__SCREAMING_SNAKE_CASE ) )
# tokens per batch
__lowerCAmelCase = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
__lowerCAmelCase = batch["""input_ids"""].shape[0]
__lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).sum()
__lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self._generative_step(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE="val" ):
'''simple docstring'''
self.step_count += 1
__lowerCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__lowerCAmelCase = losses["""loss"""]
__lowerCAmelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
__lowerCAmelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__lowerCAmelCase = torch.tensor(__SCREAMING_SNAKE_CASE ).type_as(__SCREAMING_SNAKE_CASE )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {f'{prefix}_avg_{k}': x for k, x in losses.items()}
__lowerCAmelCase = self.step_count
self.metrics[prefix].append(__SCREAMING_SNAKE_CASE ) # callback writes this to self.metrics_save_path
__lowerCAmelCase = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'{prefix}_loss': loss,
f'{prefix}_{self.val_metric}': metric_tensor,
}
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return calculate_rouge(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__lowerCAmelCase = self.model.generate(
batch["""input_ids"""],attention_mask=batch["""attention_mask"""],use_cache=__SCREAMING_SNAKE_CASE,decoder_start_token_id=self.decoder_start_token_id,num_beams=self.eval_beams,max_length=self.eval_max_length,)
__lowerCAmelCase = (time.time() - ta) / batch["""input_ids"""].shape[0]
__lowerCAmelCase = self.ids_to_clean_text(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.ids_to_clean_text(batch["""labels"""] )
__lowerCAmelCase = self._step(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = dict(zip(self.loss_names,__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = self.calc_generative_metrics(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.mean(lmap(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
base_metrics.update(gen_time=__SCREAMING_SNAKE_CASE,gen_len=__SCREAMING_SNAKE_CASE,preds=__SCREAMING_SNAKE_CASE,target=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
return base_metrics
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self._generative_step(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.validation_epoch_end(__SCREAMING_SNAKE_CASE,prefix="""test""" )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.n_obs[type_path]
__lowerCAmelCase = self.target_lens[type_path]
__lowerCAmelCase = self.dataset_class(
self.tokenizer,type_path=__SCREAMING_SNAKE_CASE,n_obs=__SCREAMING_SNAKE_CASE,max_target_length=__SCREAMING_SNAKE_CASE,**self.dataset_kwargs,)
return dataset
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
__lowerCAmelCase = self.get_dataset(__SCREAMING_SNAKE_CASE )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__lowerCAmelCase = dataset.make_sortish_sampler(__SCREAMING_SNAKE_CASE,distributed=self.hparams.gpus > 1 )
return DataLoader(
__SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,collate_fn=dataset.collate_fn,shuffle=__SCREAMING_SNAKE_CASE,num_workers=self.num_workers,sampler=__SCREAMING_SNAKE_CASE,)
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__lowerCAmelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch,distributed=self.hparams.gpus > 1 )
return DataLoader(
__SCREAMING_SNAKE_CASE,batch_sampler=__SCREAMING_SNAKE_CASE,collate_fn=dataset.collate_fn,num_workers=self.num_workers,)
else:
return DataLoader(
__SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,collate_fn=dataset.collate_fn,shuffle=__SCREAMING_SNAKE_CASE,num_workers=self.num_workers,sampler=__SCREAMING_SNAKE_CASE,)
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.get_dataloader("""train""",batch_size=self.hparams.train_batch_size,shuffle=__SCREAMING_SNAKE_CASE )
return dataloader
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.get_dataloader("""val""",batch_size=self.hparams.eval_batch_size )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.get_dataloader("""test""",batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
add_generic_args(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
parser.add_argument(
"""--max_source_length""",default=10_24,type=__SCREAMING_SNAKE_CASE,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
),)
parser.add_argument(
"""--max_target_length""",default=56,type=__SCREAMING_SNAKE_CASE,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
),)
parser.add_argument(
"""--val_max_target_length""",default=1_42,type=__SCREAMING_SNAKE_CASE,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
),)
parser.add_argument(
"""--test_max_target_length""",default=1_42,type=__SCREAMING_SNAKE_CASE,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
),)
parser.add_argument("""--freeze_encoder""",action="""store_true""" )
parser.add_argument("""--freeze_embeds""",action="""store_true""" )
parser.add_argument("""--sortish_sampler""",action="""store_true""",default=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--overwrite_output_dir""",action="""store_true""",default=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--max_tokens_per_batch""",type=__SCREAMING_SNAKE_CASE,default=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--logger_name""",type=__SCREAMING_SNAKE_CASE,choices=["""default""", """wandb""", """wandb_shared"""],default="""default""" )
parser.add_argument("""--n_train""",type=__SCREAMING_SNAKE_CASE,default=-1,required=__SCREAMING_SNAKE_CASE,help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""",type=__SCREAMING_SNAKE_CASE,default=5_00,required=__SCREAMING_SNAKE_CASE,help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""",type=__SCREAMING_SNAKE_CASE,default=-1,required=__SCREAMING_SNAKE_CASE,help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""",type=__SCREAMING_SNAKE_CASE,default="""summarization""",required=__SCREAMING_SNAKE_CASE,help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""",type=__SCREAMING_SNAKE_CASE,default=0.0,required=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--src_lang""",type=__SCREAMING_SNAKE_CASE,default="""""",required=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--tgt_lang""",type=__SCREAMING_SNAKE_CASE,default="""""",required=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--eval_beams""",type=__SCREAMING_SNAKE_CASE,default=__SCREAMING_SNAKE_CASE,required=__SCREAMING_SNAKE_CASE )
parser.add_argument(
"""--val_metric""",type=__SCREAMING_SNAKE_CASE,default=__SCREAMING_SNAKE_CASE,required=__SCREAMING_SNAKE_CASE,choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""",type=__SCREAMING_SNAKE_CASE,default=__SCREAMING_SNAKE_CASE,help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""",type=__SCREAMING_SNAKE_CASE,default=1,required=__SCREAMING_SNAKE_CASE,help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""",type=__SCREAMING_SNAKE_CASE,default=-1,required=__SCREAMING_SNAKE_CASE,help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
),)
return parser
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : int ="""translation"""
a : Optional[Any] =["""loss"""]
a : Optional[Any] =["""bleu"""]
a : Dict ="""bleu"""
def __init__( self,__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = hparams.src_lang
__lowerCAmelCase = hparams.tgt_lang
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return calculate_bleu(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( lowercase , lowercase=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=lowercase )
check_output_dir(lowercase , expected_items=3 )
if model is None:
if "summarization" in args.task:
__lowerCAmelCase = SummarizationModule(lowercase )
else:
__lowerCAmelCase = TranslationModule(lowercase )
__lowerCAmelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
__lowerCAmelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__lowerCAmelCase = os.environ.get("""WANDB_PROJECT""" , lowercase )
__lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=f'hf_{dataset}' )
if args.early_stopping_patience >= 0:
__lowerCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
__lowerCAmelCase = False
__lowerCAmelCase = args.val_metric == """loss"""
__lowerCAmelCase = generic_train(
lowercase , lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , lowercase ) , early_stopping_callback=lowercase , logger=lowercase , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
__lowerCAmelCase = """"""
__lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=lowercase ) )
if checkpoints:
__lowerCAmelCase = checkpoints[-1]
__lowerCAmelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
_a : str = pl.Trainer.add_argparse_args(parser)
_a : Dict = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_a : Tuple = parser.parse_args()
main(args)
| 46
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : List[str] = """▁"""
_a : Optional[int] = {"""vocab_file""": """spiece.model"""}
_a : int = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
_a : int = {
"""google/pegasus-xsum""": 5_1_2,
}
_a : List[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[Any] =VOCAB_FILES_NAMES
a : Tuple =VOCAB_FILES_NAMES
a : Any =PRETRAINED_VOCAB_FILES_MAP
a : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] =["""input_ids""", """attention_mask"""]
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE="<pad>",__SCREAMING_SNAKE_CASE="</s>",__SCREAMING_SNAKE_CASE="<unk>",__SCREAMING_SNAKE_CASE="<mask_2>",__SCREAMING_SNAKE_CASE="<mask_1>",__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=1_03,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
raise TypeError(
f'additional_special_tokens should be of type {type(__SCREAMING_SNAKE_CASE )}, but is'
f' {type(__SCREAMING_SNAKE_CASE )}' )
__lowerCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(__SCREAMING_SNAKE_CASE ),self.offset - 1 )
]
if len(set(__SCREAMING_SNAKE_CASE ) ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
__lowerCAmelCase = additional_special_tokens_extended
else:
__lowerCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2,self.offset )]
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__SCREAMING_SNAKE_CASE,unk_token=__SCREAMING_SNAKE_CASE,mask_token=__SCREAMING_SNAKE_CASE,pad_token=__SCREAMING_SNAKE_CASE,mask_token_sent=__SCREAMING_SNAKE_CASE,offset=__SCREAMING_SNAKE_CASE,additional_special_tokens=__SCREAMING_SNAKE_CASE,sp_model_kwargs=self.sp_model_kwargs,**__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = mask_token_sent
__lowerCAmelCase = vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# add special tokens to encoder dict
__lowerCAmelCase = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1,self.offset - 1 )} )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.offset
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self,"""sp_model_kwargs""" ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.encode(__SCREAMING_SNAKE_CASE,out_type=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__lowerCAmelCase = self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE )
return sp_id + self.offset
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__lowerCAmelCase = self.sp_model.IdToPiece(index - self.offset )
return token
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
__lowerCAmelCase = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
return 1
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(__SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(__SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE,"""wb""" ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 46
| 1
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def A ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
UpperCamelCase = Dataset.from_dict(lowercase )
return dataset
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = get_dataset()
UpperCamelCase = make_duplicate_clusters(A_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = get_dataset()
UpperCamelCase , UpperCamelCase = deduplicate_dataset(A_ )
self.assertEqual(len(A_ ) , 2 )
print(A_ )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , A_ )
| 222
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A_ , 'embed_dim' ) )
self.parent.assertTrue(hasattr(A_ , 'num_heads' ) )
class lowercase :
def __init__( self , A_ , A_=13 , A_=64 , A_=3 , A_=[16, 48, 96] , A_=[1, 3, 6] , A_=[1, 2, 10] , A_=[7, 3, 3] , A_=[4, 2, 2] , A_=[2, 1, 1] , A_=[2, 2, 2] , A_=[False, False, True] , A_=[0.0, 0.0, 0.0] , A_=0.02 , A_=1e-12 , A_=True , A_=True , A_=2 , ) -> int:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_sizes
UpperCamelCase = patch_stride
UpperCamelCase = patch_padding
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = num_labels
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = num_heads
UpperCamelCase = stride_kv
UpperCamelCase = depth
UpperCamelCase = cls_token
UpperCamelCase = attention_drop_rate
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = CvtModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
UpperCamelCase = (self.image_size, self.image_size)
UpperCamelCase , UpperCamelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCamelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCamelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = CvtForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : List[str] = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
__lowercase : Tuple = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Tuple = False
__lowercase : Union[str, Any] = False
__lowercase : Optional[Any] = False
__lowercase : List[str] = False
__lowercase : Dict = False
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = CvtModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return
@unittest.skip(reason='Cvt does not output attentions' )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(A_ , A_ , A_ ):
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = len(self.model_tester.depth )
self.assertEqual(len(A_ ) , A_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
pass
@slow
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = CvtModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
| 222
| 1
|
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : Tuple = multiprocessing.Manager()
lowercase_ : str = manager.list()
lowercase_ : List[str] = multiprocessing.Process(target=__SCREAMING_SNAKE_CASE , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
lowercase_ : List[Any] = shutil.rmtree
lowercase_ : Optional[Any] = os.rmdir
lowercase_ : Tuple = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
lowercase_ : Union[str, Any] = {}
with swallow_io():
with time_limit(__SCREAMING_SNAKE_CASE ):
exec(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
lowercase_ : List[str] = rmtree
lowercase_ : List[str] = rmdir
lowercase_ : Dict = chdir
@contextlib.contextmanager
def lowercase__( __SCREAMING_SNAKE_CASE : Any ):
def signal_handler(__SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , __SCREAMING_SNAKE_CASE )
signal.signal(signal.SIGALRM , __SCREAMING_SNAKE_CASE )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def lowercase__( ):
lowercase_ : Tuple = WriteOnlyStringIO()
with contextlib.redirect_stdout(__SCREAMING_SNAKE_CASE ):
with contextlib.redirect_stderr(__SCREAMING_SNAKE_CASE ):
with redirect_stdin(__SCREAMING_SNAKE_CASE ):
yield
@contextlib.contextmanager
def lowercase__( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__SCREAMING_SNAKE_CASE ):
yield dirname
class UpperCamelCase ( lowercase_ ):
pass
class UpperCamelCase ( io.StringIO ):
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
raise OSError
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> int:
'''simple docstring'''
raise OSError
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
raise OSError
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return False
class UpperCamelCase ( contextlib._RedirectStream ): # type: ignore
lowercase = 'stdin'
@contextlib.contextmanager
def lowercase__( __SCREAMING_SNAKE_CASE : Dict ):
if root == ".":
yield
return
lowercase_ : Any = os.getcwd()
os.chdir(__SCREAMING_SNAKE_CASE )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
lowercase_ : List[Any] = None
lowercase_ : str = None
import os
lowercase_ : Any = '1'
lowercase_ : Optional[int] = None
lowercase_ : Dict = None
lowercase_ : Union[str, Any] = None
lowercase_ : List[Any] = None
lowercase_ : Optional[int] = None
lowercase_ : Union[str, Any] = None
lowercase_ : Union[str, Any] = None
lowercase_ : Optional[Any] = None
lowercase_ : List[Any] = None
lowercase_ : List[Any] = None
lowercase_ : Union[str, Any] = None
lowercase_ : Tuple = None
lowercase_ : List[Any] = None
lowercase_ : int = None
lowercase_ : Optional[Any] = None
lowercase_ : Any = None
lowercase_ : List[Any] = None
lowercase_ : List[str] = None
lowercase_ : int = None
lowercase_ : Tuple = None
lowercase_ : Optional[int] = None
lowercase_ : List[str] = None
lowercase_ : Any = None
lowercase_ : Dict = None
lowercase_ : Optional[int] = None
lowercase_ : Union[str, Any] = None
lowercase_ : Optional[int] = None
import shutil
lowercase_ : Optional[int] = None
lowercase_ : int = None
lowercase_ : Optional[int] = None
import subprocess
lowercase_ : Optional[int] = None # type: ignore
lowercase_ : Dict = None
import sys
lowercase_ : List[Any] = None
lowercase_ : Tuple = None
lowercase_ : Union[str, Any] = None
lowercase_ : Optional[int] = None
lowercase_ : List[Any] = None
| 321
|
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[int] = bp_numa
lowercase_ : Dict = bp_numa
lowercase_ : Tuple = bp_numa
lowercase_ : List[Any] = conva_get[:2]
lowercase_ : int = conva_get[2]
lowercase_ : Dict = size_pa
lowercase_ : int = rate_w
lowercase_ : Union[str, Any] = rate_t
lowercase_ : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1
lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(__UpperCamelCase ,'wb' ) as f:
pickle.dump(__UpperCamelCase ,__UpperCamelCase )
print(f'''Model saved: {save_path}''' )
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
with open(__UpperCamelCase ,'rb' ) as f:
lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301
lowercase_ : str = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' )
lowercase_ : Optional[Any] = model_dic.get('num_bp1' )
lowercase_ : str = model_dic.get('num_bp2' )
lowercase_ : Optional[Any] = model_dic.get('num_bp3' )
lowercase_ : Union[str, Any] = model_dic.get('rate_weight' )
lowercase_ : Optional[int] = model_dic.get('rate_thre' )
# create model instance
lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# modify model parameter
lowercase_ : Optional[Any] = model_dic.get('w_conv1' )
lowercase_ : Tuple = model_dic.get('wkj' )
lowercase_ : Union[str, Any] = model_dic.get('vji' )
lowercase_ : Optional[Any] = model_dic.get('thre_conv1' )
lowercase_ : Dict = model_dic.get('thre_bp2' )
lowercase_ : Optional[int] = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return round(__UpperCamelCase ,3 )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = convs[0]
lowercase_ : Any = convs[1]
lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0]
# get the data slice of original image data, data_focus
lowercase_ : Tuple = []
for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
lowercase_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__UpperCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase_ : Dict = []
lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__UpperCamelCase ):
lowercase_ : Tuple = []
for i_focus in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[int] = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__UpperCamelCase ) )
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape(
__UpperCamelCase ,__UpperCamelCase )
data_featuremap.append(__UpperCamelCase )
# expanding the data slice to One dimenssion
lowercase_ : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) )
lowercase_ : str = np.asarray(__UpperCamelCase )
return focus_list, data_featuremap
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple:
'''simple docstring'''
lowercase_ : Union[str, Any] = len(featuremaps[0] )
lowercase_ : str = int(size_map / size_pooling )
lowercase_ : Optional[int] = []
for i_map in range(len(__UpperCamelCase ) ):
lowercase_ : int = featuremaps[i_map]
lowercase_ : List[str] = []
for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[str] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__UpperCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__UpperCamelCase ) )
lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase )
featuremap_pooled.append(__UpperCamelCase )
return featuremap_pooled
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Tuple = []
for i in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[Any] = np.shape(data[i] )
lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] )
lowercase_ : List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(__UpperCamelCase )
lowercase_ : int = np.asarray(__UpperCamelCase )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Any = np.asarray(__UpperCamelCase )
lowercase_ : Any = np.shape(__UpperCamelCase )
lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Any = []
lowercase_ : List[Any] = 0
for i_map in range(__UpperCamelCase ):
lowercase_ : List[str] = np.ones((size_map, size_map) )
for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = pd_pool[
i_pool
]
lowercase_ : Any = i_pool + 1
lowercase_ : Optional[int] = np.multiply(
__UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(__UpperCamelCase )
return pd_all
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]:
'''simple docstring'''
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) )
print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) )
lowercase_ : int = 0
lowercase_ : Tuple = []
lowercase_ : Tuple = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowercase_ : List[str] = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(__UpperCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase_ : int = np.asmatrix(datas_train[p] )
lowercase_ : Any = np.asarray(datas_teach[p] )
lowercase_ , lowercase_ : Tuple = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : Optional[int] = np.shape(__UpperCamelCase )
lowercase_ : Optional[int] = self._expand(__UpperCamelCase )
lowercase_ : int = data_bp_input
lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa
lowercase_ : Dict = self.sig(__UpperCamelCase )
lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa
lowercase_ : int = self.sig(__UpperCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase_ : str = np.multiply(
(data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Optional[int] = np.multiply(
np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji )
lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase_ : Dict = pd_conva_pooled.T.getA().tolist()
lowercase_ : List[Any] = self._calculate_gradient_from_pool(
__UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase_ : Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre
lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase_ : int = rp + 1
lowercase_ : Union[str, Any] = error_count / patterns
all_mse.append(__UpperCamelCase )
def draw_error():
lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__UpperCamelCase ,'+-' )
plt.plot(__UpperCamelCase ,'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(__UpperCamelCase ,alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) )
for p in range(len(__UpperCamelCase ) ):
lowercase_ : List[Any] = np.asmatrix(datas_test[p] )
lowercase_ , lowercase_ : Optional[Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : List[str] = self._expand(__UpperCamelCase )
lowercase_ : Any = data_bp_input
lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa
lowercase_ : str = self.sig(__UpperCamelCase )
lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa
lowercase_ : Optional[int] = self.sig(__UpperCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out]
return np.asarray(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase )
lowercase_ , lowercase_ : Union[str, Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 321
| 1
|
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
__lowerCamelCase : Tuple = [1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = 0, 0, 0
__lowerCamelCase : str = ugly_nums[ia] * 2
__lowerCamelCase : Optional[Any] = ugly_nums[ia] * 3
__lowerCamelCase : List[str] = ugly_nums[ia] * 5
for _ in range(1 , lowerCamelCase__ ):
__lowerCamelCase : int = min(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
ugly_nums.append(lowerCamelCase__ )
if next_num == next_a:
ia += 1
__lowerCamelCase : List[Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__lowerCamelCase : Optional[Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__lowerCamelCase : int = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(200) = }""")
| 73
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = '''mask2former'''
_UpperCAmelCase : Dict = ['''swin''']
_UpperCAmelCase : Optional[int] = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Dict] = None ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ,SCREAMING_SNAKE_CASE__ : str = "relu" ,SCREAMING_SNAKE_CASE__ : int = 6 ,SCREAMING_SNAKE_CASE__ : int = 1_0 ,SCREAMING_SNAKE_CASE__ : int = 8 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : int = 2_0_4_8 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : int = 4 ,SCREAMING_SNAKE_CASE__ : int = 2_5_5 ,SCREAMING_SNAKE_CASE__ : int = 1_0_0 ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,SCREAMING_SNAKE_CASE__ : float = 2.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : int = 1_2_5_4_4 ,SCREAMING_SNAKE_CASE__ : float = 3.0 ,SCREAMING_SNAKE_CASE__ : float = 0.75 ,SCREAMING_SNAKE_CASE__ : float = 0.02 ,SCREAMING_SNAKE_CASE__ : float = 1.0 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 1_6, 3_2] ,SCREAMING_SNAKE_CASE__ : bool = None ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.')
__lowerCamelCase : Optional[Any] = CONFIG_MAPPING['swin'](
image_size=2_2_4 ,in_channels=3 ,patch_size=4 ,embed_dim=9_6 ,depths=[2, 2, 1_8, 2] ,num_heads=[3, 6, 1_2, 2_4] ,window_size=7 ,drop_path_rate=0.3 ,use_absolute_embeddings=SCREAMING_SNAKE_CASE__ ,out_features=['stage1', 'stage2', 'stage3', 'stage4'] ,)
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Union[str, Any] = backbone_config.pop('model_type')
__lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : int = config_class.from_dict(SCREAMING_SNAKE_CASE__)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
F"Supported model types: {','.join(self.backbones_supported)}")
__lowerCamelCase : Dict = backbone_config
__lowerCamelCase : int = feature_size
__lowerCamelCase : List[str] = mask_feature_size
__lowerCamelCase : int = hidden_dim
__lowerCamelCase : str = encoder_feedforward_dim
__lowerCamelCase : Optional[int] = activation_function
__lowerCamelCase : int = encoder_layers
__lowerCamelCase : List[Any] = decoder_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Tuple = dropout
__lowerCamelCase : Dict = dim_feedforward
__lowerCamelCase : Union[str, Any] = pre_norm
__lowerCamelCase : List[str] = enforce_input_projection
__lowerCamelCase : Optional[int] = common_stride
__lowerCamelCase : Dict = ignore_value
__lowerCamelCase : Optional[Any] = num_queries
__lowerCamelCase : int = no_object_weight
__lowerCamelCase : Optional[Any] = class_weight
__lowerCamelCase : str = mask_weight
__lowerCamelCase : List[str] = dice_weight
__lowerCamelCase : Dict = train_num_points
__lowerCamelCase : Optional[int] = oversample_ratio
__lowerCamelCase : Optional[Any] = importance_sample_ratio
__lowerCamelCase : List[Any] = init_std
__lowerCamelCase : Tuple = init_xavier_std
__lowerCamelCase : Union[str, Any] = use_auxiliary_loss
__lowerCamelCase : List[Any] = feature_strides
__lowerCamelCase : Any = output_auxiliary_logits
__lowerCamelCase : List[Any] = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE__)
@classmethod
def lowerCAmelCase ( cls : str ,SCREAMING_SNAKE_CASE__ : PretrainedConfig ,**SCREAMING_SNAKE_CASE__ : Tuple):
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : str):
__lowerCamelCase : List[Any] = copy.deepcopy(self.__dict__)
__lowerCamelCase : List[Any] = self.backbone_config.to_dict()
__lowerCamelCase : Union[str, Any] = self.__class__.model_type
return output
| 73
| 1
|
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if edge <= 0 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if edge <= 0 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
import argparse
import copy
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = {}
with open(UpperCamelCase_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[1], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[0], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
with open(UpperCamelCase_ ) as f:
snake_case = f.read(1 )
snake_case = start_node
snake_case = []
snake_case = start_node
snake_case = 0
while visiting not in first_solution:
snake_case = 1_00_00
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(UpperCamelCase_ ) and k[0] not in first_solution:
snake_case = k[1]
snake_case = k[0]
first_solution.append(UpperCamelCase_ )
snake_case = distance_of_first_solution + int(UpperCamelCase_ )
snake_case = best_node
first_solution.append(UpperCamelCase_ )
snake_case = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_00_00
)
return first_solution, distance_of_first_solution
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = []
for n in solution[1:-1]:
snake_case = solution.index(UpperCamelCase_ )
for kn in solution[1:-1]:
snake_case = solution.index(UpperCamelCase_ )
if n == kn:
continue
snake_case = copy.deepcopy(UpperCamelCase_ )
snake_case = kn
snake_case = n
snake_case = 0
for k in _tmp[:-1]:
snake_case = _tmp[_tmp.index(UpperCamelCase_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case = distance + int(i[1] )
_tmp.append(UpperCamelCase_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda UpperCamelCase_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = 1
snake_case = first_solution
snake_case = []
snake_case = distance_of_first_solution
snake_case = solution
while count <= iters:
snake_case = find_neighborhood(UpperCamelCase_ ,UpperCamelCase_ )
snake_case = 0
snake_case = neighborhood[index_of_best_solution]
snake_case = len(UpperCamelCase_ ) - 1
snake_case = False
while not found:
snake_case = 0
while i < len(UpperCamelCase_ ):
if best_solution[i] != solution[i]:
snake_case = best_solution[i]
snake_case = solution[i]
break
snake_case = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case = True
snake_case = best_solution[:-1]
snake_case = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case = cost
snake_case = solution
else:
snake_case = index_of_best_solution + 1
snake_case = neighborhood[index_of_best_solution]
if len(UpperCamelCase_ ) >= size:
tabu_list.pop(0 )
snake_case = count + 1
return best_solution_ever, best_cost
def UpperCAmelCase__ (UpperCamelCase_=None ):
"""simple docstring"""
snake_case = generate_neighbours(args.File )
snake_case , snake_case = generate_first_solution(
args.File ,UpperCamelCase_ )
snake_case , snake_case = tabu_search(
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,args.Iterations ,args.Size ,)
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 213
| 0
|
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
SCREAMING_SNAKE_CASE__ : Tuple = 10
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : list[int] , __lowerCAmelCase : int ) -> int:
for i in range(__lowerCAmelCase , __lowerCAmelCase ):
if array[i] == target:
return i
return -1
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int ) -> int:
__lowerCamelCase = 0
__lowerCamelCase = len(__lowerCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = (left + right) // 3 + 1
__lowerCamelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__lowerCamelCase = one_third - 1
elif array[two_third] < target:
__lowerCamelCase = two_third + 1
else:
__lowerCamelCase = one_third + 1
__lowerCamelCase = two_third - 1
else:
return -1
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : list[int] , __lowerCAmelCase : int ) -> int:
if left < right:
if right - left < precision:
return lin_search(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = (left + right) // 3 + 1
__lowerCamelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__lowerCAmelCase , one_third - 1 , __lowerCAmelCase , __lowerCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __lowerCAmelCase , __lowerCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : List[str] = input("Enter numbers separated by comma:\n").strip()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
SCREAMING_SNAKE_CASE__ : str = int(input("Enter the number to be found in the list:\n").strip())
SCREAMING_SNAKE_CASE__ : int = ite_ternary_search(collection, target)
SCREAMING_SNAKE_CASE__ : Optional[Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'Iterative search: {target} found at positions: {resulta}')
print(F'Recursive search: {target} found at positions: {resulta}')
else:
print("Not found")
| 270
|
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
SCREAMING_SNAKE_CASE__ : int = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
SCREAMING_SNAKE_CASE__ : Optional[Any] = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __A ( self : Dict ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = CHRF.CHAR_ORDER , SCREAMING_SNAKE_CASE__ : int = CHRF.WORD_ORDER , SCREAMING_SNAKE_CASE__ : int = CHRF.BETA , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> Optional[Any]:
__lowerCamelCase = len(references[0] )
if any(len(SCREAMING_SNAKE_CASE__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__lowerCamelCase = [[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE__ )]
__lowerCamelCase = CHRF(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = sb_chrf.corpus_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 270
| 1
|
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
lowercase__ , lowercase__ : Optional[Any] = text, pattern
lowercase__ , lowercase__ : Any = len(__lowerCAmelCase ), len(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _lowerCAmelCase( self ) -> list[int]:
# searches pattern in text and returns index positions
lowercase__ : Tuple = []
for i in range(self.textLen - self.patLen + 1 ):
lowercase__ : str = self.mismatch_in_text(__lowerCAmelCase )
if mismatch_index == -1:
positions.append(__lowerCAmelCase )
else:
lowercase__ : Any = self.match_in_pattern(self.text[mismatch_index] )
lowercase__ : List[Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__a: List[str] = """ABAABA"""
__a: Union[str, Any] = """AB"""
__a: int = BoyerMooreSearch(text, pattern)
__a: Optional[Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 214
|
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ProphetNetTokenizer
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Any:
super().setUp()
lowercase__ : Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
lowercase__ : Union[str, Any] = '''UNwant\u00E9d,running'''
lowercase__ : List[Any] = '''unwanted, running'''
return input_text, output_text
def _lowerCAmelCase( self ) -> Any:
lowercase__ : List[str] = self.tokenizer_class(self.vocab_file )
lowercase__ : Union[str, Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : List[str] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : str = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : int = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Tuple = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Tuple = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowercase__ : Union[str, Any] = {}
for i, token in enumerate(__lowerCAmelCase ):
lowercase__ : List[Any] = i
lowercase__ : Dict = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
lowercase__ : Optional[int] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowercase__ : Dict = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
lowercase__ : int = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : Optional[int] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def _lowerCAmelCase( self ) -> str:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def _lowerCAmelCase( self ) -> Optional[Any]:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def _lowerCAmelCase( self ) -> Dict:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : List[Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
lowercase__ : List[str] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
lowercase__ : Optional[int] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
lowercase__ : Tuple = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
lowercase__ : List[str] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 214
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = DanceDiffusionPipeline
SCREAMING_SNAKE_CASE__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
SCREAMING_SNAKE_CASE__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A (self ) -> Dict:
torch.manual_seed(0 )
_lowercase =UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=UpperCAmelCase , use_timestep_embedding=UpperCAmelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
_lowercase =IPNDMScheduler()
_lowercase ={
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def __A (self , UpperCAmelCase , UpperCAmelCase=0 ) -> Tuple:
if str(UpperCAmelCase ).startswith('''mps''' ):
_lowercase =torch.manual_seed(UpperCAmelCase )
else:
_lowercase =torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
_lowercase ={
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def __A (self ) -> Tuple:
_lowercase ='''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowercase =self.get_dummy_components()
_lowercase =DanceDiffusionPipeline(**UpperCAmelCase )
_lowercase =pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_lowercase =self.get_dummy_inputs(UpperCAmelCase )
_lowercase =pipe(**UpperCAmelCase )
_lowercase =output.audios
_lowercase =audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowercase =np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __A (self ) -> Any:
return super().test_save_load_local()
@skip_mps
def __A (self ) -> Dict:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def __A (self ) -> Any:
return super().test_save_load_optional_components()
@skip_mps
def __A (self ) -> Optional[Any]:
return super().test_attention_slicing_forward_pass()
def __A (self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A (self ) -> List[str]:
_lowercase =torch_device
_lowercase =DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
_lowercase =pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_lowercase =torch.manual_seed(0 )
_lowercase =pipe(generator=UpperCAmelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
_lowercase =output.audios
_lowercase =audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowercase =np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def __A (self ) -> Optional[Any]:
_lowercase =torch_device
_lowercase =DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
_lowercase =pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_lowercase =torch.manual_seed(0 )
_lowercase =pipe(generator=UpperCAmelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
_lowercase =output.audios
_lowercase =audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowercase =np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 5
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__UpperCAmelCase : Union[str, Any] = logging.getLogger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """token-classification"""
def __init__( self : Union[str, Any] , A : List[Any] ):
if type(A ) == dict:
__snake_case: str = Namespace(**A )
__snake_case: str = import_module("""tasks""" )
try:
__snake_case: Tuple = getattr(A , hparams.task_type )
__snake_case: TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
__snake_case: Optional[int] = self.token_classification_task.get_labels(hparams.labels )
__snake_case: str = CrossEntropyLoss().ignore_index
super().__init__(A , len(self.labels ) , self.mode )
def UpperCAmelCase__ ( self : Union[str, Any] , **A : Union[str, Any] ):
return self.model(**A )
def UpperCAmelCase__ ( self : Any , A : Optional[int] , A : str ):
__snake_case: str = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
__snake_case: List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
__snake_case: Any = self(**A )
__snake_case: Any = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Tuple = self.hparams
for mode in ["train", "dev", "test"]:
__snake_case: Union[str, Any] = self._feature_file(A )
if os.path.exists(A ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , A )
__snake_case: Tuple = torch.load(A )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
__snake_case: List[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , A )
__snake_case: Optional[int] = self.token_classification_task.convert_examples_to_features(
A , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=A , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , A )
torch.save(A , A )
def UpperCAmelCase__ ( self : List[str] , A : int , A : int , A : bool = False ):
__snake_case: List[str] = self._feature_file(A )
logger.info("""Loading features from cached file %s""" , A )
__snake_case: int = torch.load(A )
__snake_case: Optional[int] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__snake_case: Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
__snake_case: Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
__snake_case: Tuple = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
__snake_case: Optional[int] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(A , A , A , A ) , batch_size=A )
def UpperCAmelCase__ ( self : Tuple , A : Optional[Any] , A : int ):
"""Compute validation""" ""
__snake_case: List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
__snake_case: Union[str, Any] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
__snake_case: List[str] = self(**A )
__snake_case , __snake_case: int = outputs[:2]
__snake_case: List[str] = logits.detach().cpu().numpy()
__snake_case: Any = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCAmelCase__ ( self : Dict , A : Dict ):
__snake_case: Union[str, Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
__snake_case: Tuple = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
__snake_case: List[str] = np.argmax(A , axis=2 )
__snake_case: Optional[Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
__snake_case: Tuple = dict(enumerate(self.labels ) )
__snake_case: Dict = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case: Tuple = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
__snake_case: Dict = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(A , A ),
"""precision""": precision_score(A , A ),
"""recall""": recall_score(A , A ),
"""f1""": fa_score(A , A ),
}
__snake_case: Dict = dict(results.items() )
__snake_case: Dict = results
return ret, preds_list, out_label_list
def UpperCAmelCase__ ( self : Tuple , A : int ):
# when stable
__snake_case , __snake_case , __snake_case: List[str] = self._eval_end(A )
__snake_case: int = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCAmelCase__ ( self : Dict , A : Optional[int] ):
# updating to test_epoch_end instead of deprecated test_end
__snake_case , __snake_case , __snake_case: int = self._eval_end(A )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
__snake_case: List[Any] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCAmelCase__ ( A : Any , A : int ):
# Add NER specific options
BaseTransformer.add_model_specific_args(A , A )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=A , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=A , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=A , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=A , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__UpperCAmelCase : Tuple = NERTransformer.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase : List[str] = parser.parse_args()
__UpperCAmelCase : Optional[int] = NERTransformer(args)
__UpperCAmelCase : Tuple = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__UpperCAmelCase : int = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
__UpperCAmelCase : Any = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 111
| 0
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_UpperCAmelCase : Dict = cst_fwd.get(__lowerCAmelCase , np.inf )
_UpperCAmelCase : List[Any] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_UpperCAmelCase : int = new_cost_f
_UpperCAmelCase : str = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_UpperCAmelCase : Tuple = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = -1
_UpperCAmelCase : Optional[int] = set()
_UpperCAmelCase : List[Any] = set()
_UpperCAmelCase : str = {source: 0}
_UpperCAmelCase : Union[str, Any] = {destination: 0}
_UpperCAmelCase : Union[str, Any] = {source: None}
_UpperCAmelCase : List[Any] = {destination: None}
_UpperCAmelCase : PriorityQueue[Any] = PriorityQueue()
_UpperCAmelCase : PriorityQueue[Any] = PriorityQueue()
_UpperCAmelCase : Union[str, Any] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_UpperCAmelCase : Dict = queue_forward.get()
visited_forward.add(__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = queue_backward.get()
visited_backward.add(__lowerCAmelCase )
_UpperCAmelCase : List[str] = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
_UpperCAmelCase : Optional[int] = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_UpperCAmelCase : str = shortest_distance
return shortest_path_distance
lowerCamelCase__ = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
lowerCamelCase__ = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase__ = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ = 'RegNetConfig'
# Base docstring
lowerCamelCase__ = 'facebook/regnet-y-040'
lowerCamelCase__ = [1, 1_088, 7, 7]
# Image classification docstring
lowerCamelCase__ = 'facebook/regnet-y-040'
lowerCamelCase__ = 'tabby, tabby cat'
lowerCamelCase__ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 3 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[str] = "relu" , **lowerCamelCase__ : Tuple , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_UpperCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_UpperCAmelCase : Dict = tf.keras.layers.ConvaD(
filters=lowerCamelCase__ , kernel_size=lowerCamelCase__ , strides=lowerCamelCase__ , padding="VALID" , groups=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" , )
_UpperCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
_UpperCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.convolution(self.padding(lowerCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = self.normalization(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = config.num_channels
_UpperCAmelCase : Any = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = shape_list(lowerCamelCase__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_UpperCAmelCase : Optional[Any] = tf.transpose(lowerCamelCase__ , perm=(0, 2, 3, 1) )
_UpperCAmelCase : List[Any] = self.embedder(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : int = tf.keras.layers.ConvaD(
filters=lowerCamelCase__ , kernel_size=1 , strides=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" )
_UpperCAmelCase : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False ) ->tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(lowerCamelCase__ ) , training=lowerCamelCase__ )
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ) ->Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" )
_UpperCAmelCase : int = [
tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.pooler(lowerCamelCase__ )
for layer_module in self.attention:
_UpperCAmelCase : str = layer_module(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = hidden_state * pooled
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : Any ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = in_channels != out_channels or stride != 1
_UpperCAmelCase : List[str] = max(1 , out_channels // config.groups_width )
_UpperCAmelCase : List[str] = (
TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_UpperCAmelCase : Optional[int] = [
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.2" ),
]
_UpperCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = hidden_state
for layer_module in self.layers:
_UpperCAmelCase : List[Any] = layer_module(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.shortcut(lowerCamelCase__ )
hidden_state += residual
_UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = in_channels != out_channels or stride != 1
_UpperCAmelCase : Optional[int] = max(1 , out_channels // config.groups_width )
_UpperCAmelCase : Union[str, Any] = (
TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
_UpperCAmelCase : List[Any] = [
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(lowerCamelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.3" ),
]
_UpperCAmelCase : int = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = hidden_state
for layer_module in self.layers:
_UpperCAmelCase : Tuple = layer_module(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.shortcut(lowerCamelCase__ )
hidden_state += residual
_UpperCAmelCase : Tuple = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : str = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
_UpperCAmelCase : List[str] = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ , name="layers.0" ),
*[layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] ) ->List[str]:
'''simple docstring'''
for layer_module in self.layers:
_UpperCAmelCase : Optional[int] = layer_module(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
_UpperCAmelCase : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , depth=lowerCamelCase__ , name=F"""stages.{i+1}""" ) )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True ) ->TFBaseModelOutputWithNoAttention:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCAmelCase : Optional[Any] = hidden_states + (hidden_state,)
_UpperCAmelCase : Dict = stage_module(lowerCamelCase__ )
if output_hidden_states:
_UpperCAmelCase : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase__ , hidden_states=lowerCamelCase__ )
@keras_serializable
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
lowerCAmelCase : Optional[Any] = RegNetConfig
def __init__( self : Union[str, Any] , lowerCamelCase__ : Any , **lowerCamelCase__ : str ) ->int:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = config
_UpperCAmelCase : Union[str, Any] = TFRegNetEmbeddings(lowerCamelCase__ , name="embedder" )
_UpperCAmelCase : Union[str, Any] = TFRegNetEncoder(lowerCamelCase__ , name="encoder" )
_UpperCAmelCase : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" )
@unpack_inputs
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , ) ->TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
_UpperCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.embedder(lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : str = self.encoder(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : Dict = encoder_outputs[0]
_UpperCAmelCase : Dict = self.pooler(lowerCamelCase__ )
# Change to NCHW output format have uniformity in the modules
_UpperCAmelCase : Union[str, Any] = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) )
_UpperCAmelCase : Tuple = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_UpperCAmelCase : List[str] = tuple([tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase__ , pooler_output=lowerCamelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Tuple = RegNetConfig
lowerCAmelCase : Tuple = "regnet"
lowerCAmelCase : Union[str, Any] = "pixel_values"
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
lowerCamelCase__ = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCamelCase__ = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Any , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ) ->Optional[int]:
'''simple docstring'''
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Any=False , ) ->Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.regnet(
pixel_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Union[str, Any] ) ->Any:
'''simple docstring'''
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = config.num_labels
_UpperCAmelCase : Dict = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" )
# classification head
_UpperCAmelCase : str = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict=False , ) ->Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
_UpperCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.regnet(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : int = outputs.pooler_output if return_dict else outputs[1]
_UpperCAmelCase : Dict = self.classifier[0](lowerCamelCase__ )
_UpperCAmelCase : str = self.classifier[1](lowerCamelCase__ )
_UpperCAmelCase : Tuple = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase__ , logits=lowerCamelCase__ )
if not return_dict:
_UpperCAmelCase : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase__ , logits=lowerCamelCase__ , hidden_states=outputs.hidden_states )
| 322
| 0
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__a :Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__a :Optional[int] = 25_0004
__a :Union[str, Any] = 25_0020
@require_sentencepiece
@require_tokenizers
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Tuple = MBartTokenizer
_lowerCamelCase : List[str] = MBartTokenizerFast
_lowerCamelCase : Tuple = True
_lowerCamelCase : Tuple = True
def __A ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
A_ = MBartTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self : int ):
A_ = MBartTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
A_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
A_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def __A ( self : List[str] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A_ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
A_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
A_ = tempfile.mkdtemp()
A_ = tokenizer_r.save_pretrained(UpperCAmelCase )
A_ = tokenizer_p.save_pretrained(UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
A_ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(UpperCAmelCase , UpperCAmelCase )
# Checks everything loads correctly in the same way
A_ = tokenizer_r.from_pretrained(UpperCAmelCase )
A_ = tokenizer_p.from_pretrained(UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
A_ = tempfile.mkdtemp()
A_ = tokenizer_r.save_pretrained(UpperCAmelCase , legacy_format=UpperCAmelCase )
A_ = tokenizer_p.save_pretrained(UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase , UpperCAmelCase )
# Checks everything loads correctly in the same way
A_ = tokenizer_r.from_pretrained(UpperCAmelCase )
A_ = tokenizer_p.from_pretrained(UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) )
shutil.rmtree(UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
A_ = tempfile.mkdtemp()
A_ = tokenizer_r.save_pretrained(UpperCAmelCase , legacy_format=UpperCAmelCase )
A_ = tokenizer_p.save_pretrained(UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A_ = tokenizer_r.from_pretrained(UpperCAmelCase )
A_ = tokenizer_p.from_pretrained(UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) )
shutil.rmtree(UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : List[str] = 'facebook/mbart-large-en-ro'
_lowerCamelCase : str = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_lowerCamelCase : Union[str, Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_lowerCamelCase : Tuple = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def __A ( cls : Tuple ):
A_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
A_ = 1
return cls
def __A ( self : Any ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020 )
def __A ( self : Tuple ):
A_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase )
def __A ( self : Dict ):
self.assertIn(UpperCAmelCase , self.tokenizer.all_special_ids )
A_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
A_ = self.tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
A_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase )
def __A ( self : Optional[int] ):
A_ = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , UpperCAmelCase )
A_ = 10
A_ = self.tokenizer(UpperCAmelCase , max_length=UpperCAmelCase , truncation=UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
def __A ( self : Tuple ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250026, 250001] )
def __A ( self : Optional[Any] ):
A_ = tempfile.mkdtemp()
A_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCAmelCase )
A_ = MBartTokenizer.from_pretrained(UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase )
@require_torch
def __A ( self : int ):
A_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase , return_tensors="pt" )
A_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __A ( self : Optional[Any] ):
A_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
A_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
A_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __A ( self : Optional[int] ):
A_ = self.tokenizer(self.src_text , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=3 , return_tensors="pt" )
A_ = self.tokenizer(
text_target=self.tgt_text , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=10 , return_tensors="pt" )
A_ = targets["input_ids"]
A_ = shift_tokens_right(UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __A ( self : Optional[int] ):
A_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
# A, test, EOS, en_XX
"input_ids": [[62, 3034, 2, 250004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250001,
} , )
| 312
|
from __future__ import annotations
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in range(1 ,len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 ,len(__UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 ,len(__UpperCamelCase ) ):
for j in range(1 ,len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 1
|
from __future__ import annotations
_A = 8.988e9 # units = N * m^s * C^-2
def __UpperCamelCase ( _A , _A , _A , _A ):
lowerCAmelCase_ = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowerCAmelCase_ = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowerCAmelCase_ = abs(_A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowerCAmelCase_ = abs(_A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowerCAmelCase_ = (COULOMBS_CONSTANT * charge_product / abs(_A )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_A = get_tests_dir('''fixtures''')
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = mock.Mock()
lowerCAmelCase_ = 500
lowerCAmelCase_ = {}
lowerCAmelCase_ = HTTPError
lowerCAmelCase_ = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''', return_value=UpperCamelCase__ ) as mock_head:
lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with self.assertRaises(UpperCamelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''', subfolder='''feature_extractor''' )
self.assertIsNotNone(UpperCamelCase__ )
@is_staging_test
class A ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
"""simple docstring"""
lowerCAmelCase_ = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = ViTImageProcessor.from_pretrained(UpperCamelCase__ )
image_processor.push_to_hub('''test-image-processor''', use_auth_token=self._token )
lowerCAmelCase_ = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase__, getattr(UpperCamelCase__, UpperCamelCase__ ) )
# Reset repo
delete_repo(token=self._token, repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCamelCase__, repo_id='''test-image-processor''', push_to_hub=UpperCamelCase__, use_auth_token=self._token )
lowerCAmelCase_ = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase__, getattr(UpperCamelCase__, UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = ViTImageProcessor.from_pretrained(UpperCamelCase__ )
image_processor.push_to_hub('''valid_org/test-image-processor''', use_auth_token=self._token )
lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase__, getattr(UpperCamelCase__, UpperCamelCase__ ) )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCamelCase__, repo_id='''valid_org/test-image-processor-org''', push_to_hub=UpperCamelCase__, use_auth_token=self._token )
lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase__, getattr(UpperCamelCase__, UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
lowerCAmelCase_ = CustomImageProcessor.from_pretrained(UpperCamelCase__ )
image_processor.push_to_hub('''test-dynamic-image-processor''', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''}, )
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(
f"{USER}/test-dynamic-image-processor", trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, '''CustomImageProcessor''' )
| 167
| 1
|
def lowerCAmelCase_ ( __a = 10**12 ) -> int:
"""simple docstring"""
lowerCamelCase__: str =1
lowerCamelCase__: List[str] =0
lowerCamelCase__: Optional[int] =1
lowerCamelCase__: List[Any] =1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'{solution() = }')
| 10
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
with open(__magic_name__, encoding='''utf-8''' ) as input_file:
UpperCamelCase__ : Tuple = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCamelCase__ : str = input_file.read()
UpperCamelCase__ : List[Any] = regexp.search(__magic_name__ )
return match
def UpperCamelCase__ ( self, __magic_name__ ) -> Any:
"""simple docstring"""
with open(__magic_name__, encoding='''utf-8''' ) as input_file:
UpperCamelCase__ : Dict = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''', re.DOTALL )
UpperCamelCase__ : Any = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase__ : Tuple = regexp.finditer(__magic_name__ )
UpperCamelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = Path('''./datasets''' )
UpperCamelCase__ : Any = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__magic_name__ ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = Path('''./datasets''' )
UpperCamelCase__ : Optional[Any] = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__magic_name__ ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 201
| 0
|
from __future__ import annotations
import math
class _SCREAMING_SNAKE_CASE :
def __init__( self : int , __lowerCamelCase : int ):
UpperCamelCase :Tuple = size
# approximate the overall size of segment tree with given value
UpperCamelCase :List[str] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCamelCase :Optional[Any] = [0 for i in range(0 , 4 * size )]
UpperCamelCase :str = [0 for i in range(0 , 4 * size )] # flag for lazy update
def _A ( self : Dict , __lowerCamelCase : int ):
return idx * 2
def _A ( self : List[str] , __lowerCamelCase : int ):
return idx * 2 + 1
def _A ( self : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] ):
if left_element == right_element:
UpperCamelCase :Optional[Any] = a[left_element - 1]
else:
UpperCamelCase :Tuple = (left_element + right_element) // 2
self.build(self.left(__lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
self.build(self.right(__lowerCamelCase ) , mid + 1 , __lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Optional[int] = max(
self.segment_tree[self.left(__lowerCamelCase )] , self.segment_tree[self.right(__lowerCamelCase )] )
def _A ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
if self.flag[idx] is True:
UpperCamelCase :List[Any] = self.lazy[idx]
UpperCamelCase :List[Any] = False
if left_element != right_element:
UpperCamelCase :Optional[int] = self.lazy[idx]
UpperCamelCase :Dict = self.lazy[idx]
UpperCamelCase :Any = True
UpperCamelCase :Any = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCamelCase :Dict = val
if left_element != right_element:
UpperCamelCase :List[Any] = val
UpperCamelCase :Dict = val
UpperCamelCase :Optional[int] = True
UpperCamelCase :Optional[int] = True
return True
UpperCamelCase :Optional[int] = (left_element + right_element) // 2
self.update(self.left(__lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
self.update(self.right(__lowerCamelCase ) , mid + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = max(
self.segment_tree[self.left(__lowerCamelCase )] , self.segment_tree[self.right(__lowerCamelCase )] )
return True
def _A ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
if self.flag[idx] is True:
UpperCamelCase :Dict = self.lazy[idx]
UpperCamelCase :Any = False
if left_element != right_element:
UpperCamelCase :Optional[int] = self.lazy[idx]
UpperCamelCase :List[Any] = self.lazy[idx]
UpperCamelCase :Union[str, Any] = True
UpperCamelCase :Tuple = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCamelCase :Any = (left_element + right_element) // 2
UpperCamelCase :Dict = self.query(self.left(__lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Any = self.query(self.right(__lowerCamelCase ) , mid + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return max(__lowerCamelCase , __lowerCamelCase )
def __str__( self : Optional[int] ):
return str([self.query(1 , 1 , self.size , __lowerCamelCase , __lowerCamelCase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
UpperCAmelCase_ : Any = 15
UpperCAmelCase_ : str = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt)
| 62
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : str ):
UpperCamelCase :Dict = """hf-internal-testing/tiny-random-t5"""
UpperCamelCase :Optional[int] = AutoTokenizer.from_pretrained(__lowerCamelCase )
UpperCamelCase :Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
UpperCamelCase :List[Any] = tokenizer("""This is me""" , return_tensors="""pt""" )
UpperCamelCase :int = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCamelCase :List[Any] = model.generate(**__lowerCamelCase )
UpperCamelCase :List[Any] = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
UpperCamelCase :List[str] = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCamelCase :List[Any] = model_reloaded.generate(**__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) )
def _A ( self : Optional[int] ):
UpperCamelCase :Dict = """hf-internal-testing/tiny-random-t5"""
UpperCamelCase :Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
UpperCamelCase :List[Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCamelCase ):
model.save_pretrained(__lowerCamelCase )
UpperCamelCase :int = model.reverse_bettertransformer()
model.save_pretrained(__lowerCamelCase )
| 62
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowercase : Any = None
lowercase : str = logging.get_logger(__name__)
lowercase : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase : Tuple = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
lowercase : Union[str, Any] = {
"""facebook/nllb-large-en-ro""": 1024,
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
lowercase : Tuple = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class __snake_case ( lowerCAmelCase ):
_a : Dict= VOCAB_FILES_NAMES
_a : Dict= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : str= PRETRAINED_VOCAB_FILES_MAP
_a : Tuple= ["input_ids", "attention_mask"]
_a : int= NllbTokenizer
_a : List[int]= []
_a : List[int]= []
def __init__( self ,snake_case=None ,snake_case=None ,snake_case="<s>" ,snake_case="</s>" ,snake_case="</s>" ,snake_case="<s>" ,snake_case="<unk>" ,snake_case="<pad>" ,snake_case="<mask>" ,snake_case=None ,snake_case=None ,snake_case=None ,snake_case=False ,**snake_case ,):
'''simple docstring'''
lowercase : int = AddedToken(snake_case ,lstrip=snake_case ,rstrip=snake_case ) if isinstance(snake_case ,snake_case ) else mask_token
lowercase : Dict = legacy_behaviour
super().__init__(
vocab_file=snake_case ,tokenizer_file=snake_case ,bos_token=snake_case ,eos_token=snake_case ,sep_token=snake_case ,cls_token=snake_case ,unk_token=snake_case ,pad_token=snake_case ,mask_token=snake_case ,src_lang=snake_case ,tgt_lang=snake_case ,additional_special_tokens=snake_case ,legacy_behaviour=snake_case ,**snake_case ,)
lowercase : Union[str, Any] = vocab_file
lowercase : int = False if not self.vocab_file else True
lowercase : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
lowercase : List[Any] = {
lang_code: self.convert_tokens_to_ids(snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase : Tuple = src_lang if src_lang is not None else """eng_Latn"""
lowercase : Optional[Any] = self.convert_tokens_to_ids(self._src_lang )
lowercase : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : List[Any] = [self.sep_token_id]
lowercase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,**snake_case ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowercase : List[Any] = src_lang
lowercase : Dict = self(snake_case ,add_special_tokens=snake_case ,return_tensors=snake_case ,**snake_case )
lowercase : Optional[int] = self.convert_tokens_to_ids(snake_case )
lowercase : Optional[Any] = tgt_lang_id
return inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = "eng_Latn" ,snake_case = None ,snake_case = "fra_Latn" ,**snake_case ,):
'''simple docstring'''
lowercase : Tuple = src_lang
lowercase : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(snake_case ,snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Dict = self.convert_tokens_to_ids(snake_case )
if self.legacy_behaviour:
lowercase : Optional[int] = []
lowercase : int = [self.eos_token_id, self.cur_lang_code]
else:
lowercase : Union[str, Any] = [self.cur_lang_code]
lowercase : Any = [self.eos_token_id]
lowercase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str ,pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = self.convert_tokens_to_ids(snake_case )
if self.legacy_behaviour:
lowercase : Union[str, Any] = []
lowercase : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
lowercase : Optional[Any] = [self.cur_lang_code]
lowercase : List[Any] = [self.eos_token_id]
lowercase : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase : int = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str ,pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
lowercase : Tuple = os.path.join(
snake_case ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ):
copyfile(self.vocab_file ,snake_case )
return (out_vocab_file,)
| 20
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _snake_case( *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=2 ) -> Optional[Any]:
from .. import __version__
lowercase : int = take_from
lowercase : Tuple = ()
if not isinstance(args[0] , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
lowercase : int = None
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(SCREAMING_SNAKE_CASE__ ),)
lowercase : Union[str, Any] = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
values += (getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),)
lowercase : int = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
lowercase : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
lowercase : Dict = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , SCREAMING_SNAKE_CASE__ , stacklevel=SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) > 0:
lowercase : str = inspect.getouterframes(inspect.currentframe() )[1]
lowercase : List[str] = call_frame.filename
lowercase : Tuple = call_frame.lineno
lowercase : List[str] = call_frame.function
lowercase , lowercase : Optional[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return
elif len(SCREAMING_SNAKE_CASE__ ) == 1:
return values[0]
return values
| 20
| 1
|
import requests
from bsa import BeautifulSoup
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = BeautifulSoup(requests.get(_A , params=_A ).content , '''html.parser''' )
lowerCAmelCase_ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
lowerCAmelCase_ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
_A = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 2_018,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 356
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_A = logging.get_logger(__name__)
class A ( __UpperCAmelCase ):
__snake_case = ['pixel_values']
def __init__( self, UpperCamelCase__ = True, UpperCamelCase__ = 32, UpperCamelCase__=PILImageResampling.BILINEAR, UpperCamelCase__ = True, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = size_divisor
lowerCAmelCase_ = resample
super().__init__(**UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(UpperCamelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
lowerCAmelCase_ = height // size_divisor * size_divisor
lowerCAmelCase_ = width // size_divisor * size_divisor
lowerCAmelCase_ = resize(UpperCamelCase__, (new_h, new_w), resample=UpperCamelCase__, data_format=UpperCamelCase__, **UpperCamelCase__ )
return image
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, **UpperCamelCase__ ):
"""simple docstring"""
return rescale(image=UpperCamelCase__, scale=UpperCamelCase__, data_format=UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__=None, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = ChannelDimension.FIRST, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = size_divisor if size_divisor is not None else self.size_divisor
lowerCAmelCase_ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
lowerCAmelCase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(UpperCamelCase__ ) for img in images]
if do_resize:
lowerCAmelCase_ = [self.resize(UpperCamelCase__, size_divisor=UpperCamelCase__, resample=UpperCamelCase__ ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(UpperCamelCase__, scale=1 / 255 ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(UpperCamelCase__, UpperCamelCase__ ) for image in images]
lowerCAmelCase_ = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__, tensor_type=UpperCamelCase__ )
| 167
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'philschmid/bart-large-cnn-samsum'
_SCREAMING_SNAKE_CASE = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
_SCREAMING_SNAKE_CASE = 'summarizer'
_SCREAMING_SNAKE_CASE = AutoTokenizer
_SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM
_SCREAMING_SNAKE_CASE = ['text']
_SCREAMING_SNAKE_CASE = ['text']
def _snake_case ( self , lowercase ) -> List[Any]:
return self.pre_processor(lowercase , return_tensors="""pt""" , truncation=lowercase )
def _snake_case ( self , lowercase ) -> Dict:
return self.model.generate(**lowercase )[0]
def _snake_case ( self , lowercase ) -> str:
return self.pre_processor.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
| 46
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return [ord(SCREAMING_SNAKE_CASE ) - 96 for elem in plain]
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , SCREAMING_SNAKE_CASE )
print("""Decoded:""" , decode(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
| 46
| 1
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: Any=2 , __lowerCamelCase: Dict=3 , __lowerCamelCase: Optional[Any]=16 , __lowerCamelCase: List[str]=[32, 64, 1_28] , __lowerCamelCase: str=[1, 2, 1] , __lowerCamelCase: Optional[Any]=[2, 2, 4] , __lowerCamelCase: Tuple=2 , __lowerCamelCase: List[Any]=2.0 , __lowerCamelCase: Tuple=True , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: str=0.0 , __lowerCamelCase: str=0.1 , __lowerCamelCase: str="gelu" , __lowerCamelCase: Union[str, Any]=False , __lowerCamelCase: Tuple=True , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: Optional[Any]=1e-5 , __lowerCamelCase: str=True , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: int=True , __lowerCamelCase: List[str]=10 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=["stage1", "stage2"] , __lowerCamelCase: Union[str, Any]=[1, 2] , ) -> Union[str, Any]:
__UpperCAmelCase : str = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = image_size
__UpperCAmelCase : Tuple = patch_size
__UpperCAmelCase : List[Any] = num_channels
__UpperCAmelCase : Dict = embed_dim
__UpperCAmelCase : int = hidden_sizes
__UpperCAmelCase : Union[str, Any] = depths
__UpperCAmelCase : List[Any] = num_heads
__UpperCAmelCase : Optional[Any] = window_size
__UpperCAmelCase : Optional[int] = mlp_ratio
__UpperCAmelCase : Union[str, Any] = qkv_bias
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Any = drop_path_rate
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : int = use_absolute_embeddings
__UpperCAmelCase : List[str] = patch_norm
__UpperCAmelCase : Any = layer_norm_eps
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : Union[str, Any] = is_training
__UpperCAmelCase : Optional[int] = scope
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = type_sequence_label_size
__UpperCAmelCase : Tuple = encoder_stride
__UpperCAmelCase : Tuple = out_features
__UpperCAmelCase : List[Any] = out_indices
def _lowerCamelCase ( self: Tuple ) -> str:
__UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[Any] = None
if self.use_labels:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: int ) -> Dict:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : str = FocalNetModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCAmelCase : int = model(UpperCamelCase__ )
__UpperCAmelCase : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: int , __lowerCamelCase: Dict ) -> Dict:
__UpperCAmelCase : Optional[Any] = FocalNetBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCAmelCase : str = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : List[Any] = FocalNetBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCAmelCase : Tuple = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ) -> int:
__UpperCAmelCase : Optional[int] = FocalNetForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCAmelCase : int = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCAmelCase : Union[str, Any] = 1
__UpperCAmelCase : str = FocalNetForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCAmelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] ) -> Any:
__UpperCAmelCase : Dict = self.type_sequence_label_size
__UpperCAmelCase : Optional[Any] = FocalNetForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCAmelCase : List[str] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : Optional[Any] = FocalNetForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase : Dict = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self: Dict ) -> Optional[int]:
__UpperCAmelCase : Dict = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = config_and_inputs
__UpperCAmelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __a , __a , unittest.TestCase ):
lowerCamelCase__: Any = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__: str = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: Union[str, Any] = False
lowerCamelCase__: Union[str, Any] = False
lowerCamelCase__: int = False
lowerCamelCase__: str = False
lowerCamelCase__: Dict = False
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : Tuple = FocalNetModelTester(self )
__UpperCAmelCase : Dict = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 , has_text_modality=UpperCamelCase__ )
def _lowerCamelCase ( self: Tuple ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self: str ) -> int:
return
def _lowerCamelCase ( self: Any ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
def _lowerCamelCase ( self: Dict ) -> Optional[Any]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def _lowerCamelCase ( self: Dict ) -> List[str]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def _lowerCamelCase ( self: Dict ) -> Any:
pass
def _lowerCamelCase ( self: List[str] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__UpperCAmelCase : str = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _lowerCamelCase ( self: str ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase__ )
__UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Dict = [*signature.parameters.keys()]
__UpperCAmelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Any , __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] ) -> Dict:
__UpperCAmelCase : Optional[int] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__UpperCAmelCase : List[Any] = outputs.hidden_states
__UpperCAmelCase : List[str] = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# FocalNet has a different seq_length
__UpperCAmelCase : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCAmelCase : str = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = reshaped_hidden_states[0].shape
__UpperCAmelCase : Any = (
reshaped_hidden_states[0].view(UpperCamelCase__ , UpperCamelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self: Dict ) -> int:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__UpperCAmelCase : Tuple = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[int] = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase : List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__UpperCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Any = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
@slow
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : int = FocalNetModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def _lowerCamelCase ( self: int ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = _config_zero_init(UpperCamelCase__ )
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = model_class(config=UpperCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: str ) -> Dict:
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: List[Any] ) -> str:
__UpperCAmelCase : Any = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCamelCase__ )
__UpperCAmelCase : int = self.default_image_processor
__UpperCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__UpperCAmelCase : List[str] = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(**UpperCamelCase__ )
# verify the logits
__UpperCAmelCase : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
__UpperCAmelCase : Any = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class _snake_case ( __a , unittest.TestCase ):
lowerCamelCase__: List[str] = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase__: Optional[Any] = FocalNetConfig
lowerCamelCase__: Tuple = False
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
__UpperCAmelCase : List[Any] = FocalNetModelTester(self )
| 369
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = 384
if "tiny" in model_name:
__UpperCAmelCase : Union[str, Any] = [3, 3, 9, 3]
__UpperCAmelCase : List[Any] = [96, 192, 384, 768]
if "small" in model_name:
__UpperCAmelCase : Tuple = [3, 3, 27, 3]
__UpperCAmelCase : Any = [96, 192, 384, 768]
if "base" in model_name:
__UpperCAmelCase : str = [3, 3, 27, 3]
__UpperCAmelCase : str = [128, 256, 512, 1024]
__UpperCAmelCase : str = 512
if "large" in model_name:
__UpperCAmelCase : Dict = [3, 3, 27, 3]
__UpperCAmelCase : int = [192, 384, 768, 1536]
__UpperCAmelCase : Dict = 768
if "xlarge" in model_name:
__UpperCAmelCase : List[Any] = [3, 3, 27, 3]
__UpperCAmelCase : Tuple = [256, 512, 1024, 2048]
__UpperCAmelCase : int = 1024
# set label information
__UpperCAmelCase : List[Any] = 150
__UpperCAmelCase : str = "huggingface/label-files"
__UpperCAmelCase : List[Any] = "ade20k-id2label.json"
__UpperCAmelCase : str = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : int = ConvNextConfig(
depths=snake_case__, hidden_sizes=snake_case__, out_features=["stage1", "stage2", "stage3", "stage4"] )
__UpperCAmelCase : int = UperNetConfig(
backbone_config=snake_case__, auxiliary_in_channels=snake_case__, num_labels=snake_case__, idalabel=snake_case__, labelaid=snake_case__, )
return config
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Optional[int] = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any:
__UpperCAmelCase : Union[str, Any] = dct.pop(snake_case__ )
__UpperCAmelCase : Optional[int] = val
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : Dict = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
__UpperCAmelCase : Union[str, Any] = model_name_to_url[model_name]
__UpperCAmelCase : str = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" )["state_dict"]
__UpperCAmelCase : Dict = get_upernet_config(snake_case__ )
__UpperCAmelCase : str = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase : str = state_dict.pop(snake_case__ )
if "bn" in key:
__UpperCAmelCase : int = key.replace("bn", "batch_norm" )
__UpperCAmelCase : Union[str, Any] = val
# rename keys
__UpperCAmelCase : Optional[Any] = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__, snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
__UpperCAmelCase : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__UpperCAmelCase : Optional[int] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" )
__UpperCAmelCase : str = SegformerImageProcessor()
__UpperCAmelCase : Any = processor(snake_case__, return_tensors="pt" ).pixel_values
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(snake_case__ )
if model_name == "upernet-convnext-tiny":
__UpperCAmelCase : Any = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__UpperCAmelCase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__UpperCAmelCase : Tuple = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:", outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case__, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''nielsr/canine-s''': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_lowerCAmelCase = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_lowerCAmelCase = 0
_lowerCAmelCase = 0xe_000
_lowerCAmelCase = 0xe_001
_lowerCAmelCase = 0xe_002
_lowerCAmelCase = 0xe_003
_lowerCAmelCase = 0xe_004
# Maps special codepoints to human-readable names.
_lowerCAmelCase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_lowerCAmelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=False ,__UpperCAmelCase=2048 ,**__UpperCAmelCase ,) -> Optional[int]:
lowerCAmelCase__ : Optional[int] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else bos_token
lowerCAmelCase__ : Dict = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else eos_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else sep_token
lowerCAmelCase__ : int = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else cls_token
lowerCAmelCase__ : int = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : str = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else mask_token
super().__init__(
bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,add_prefix_space=__UpperCAmelCase ,model_max_length=__UpperCAmelCase ,**__UpperCAmelCase ,)
# Creates a mapping for looking up the IDs of special symbols.
lowerCAmelCase__ : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCAmelCase__ : Tuple = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCAmelCase__ : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCAmelCase__ : Optional[Any] = UNICODE_VOCAB_SIZE
lowerCAmelCase__ : Any = len(self._special_codepoints )
@property
def UpperCAmelCase_ ( self ) -> int:
return self._unicode_vocab_size
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
return list(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
try:
return ord(__UpperCAmelCase )
except TypeError:
raise ValueError(F"""invalid token: '{token}'""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__UpperCAmelCase )
except TypeError:
raise ValueError(F"""invalid id: {index}""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
return "".join(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : str = [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
lowerCAmelCase__ : str = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = [1] + ([0] * len(__UpperCAmelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(__UpperCAmelCase )) + [1]
return result
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : Optional[int] = [self.sep_token_id]
lowerCAmelCase__ : List[str] = [self.cls_token_id]
lowerCAmelCase__ : Optional[int] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> int:
return ()
| 37
|
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"-m", "--pretrained_model_name_or_path", type=a_, default=a_, required=a_, help="Path to pretrained model or model identifier from huggingface.co/models.", )
parser.add_argument(
"-c", "--caption", type=a_, default="robotic cat with wings", help="Text used to generate images.", )
parser.add_argument(
"-n", "--images_num", type=a_, default=4, help="How much images to generate.", )
parser.add_argument(
"-s", "--seed", type=a_, default=42, help="Seed for random process.", )
parser.add_argument(
"-ci", "--cuda_id", type=a_, default=0, help="cuda_id.", )
_UpperCAmelCase : Any = parser.parse_args()
return args
def __UpperCAmelCase ( a_: Any, a_: List[Any], a_: Optional[Any] ):
if not len(a_ ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = imgs[0].size
_UpperCAmelCase : Union[str, Any] = Image.new("RGB", size=(cols * w, rows * h) )
_UpperCAmelCase , _UpperCAmelCase : Any = grid.size
for i, img in enumerate(a_ ):
grid.paste(a_, box=(i % cols * w, i // cols * h) )
return grid
def __UpperCAmelCase ( a_: List[str], a_: Optional[int]="robotic cat with wings", a_: List[str]=7.5, a_: Optional[int]=50, a_: List[Any]=1, a_: Union[str, Any]=42, ):
_UpperCAmelCase : Optional[Any] = torch.Generator(pipeline.device ).manual_seed(a_ )
_UpperCAmelCase : Dict = pipeline(
a_, guidance_scale=a_, num_inference_steps=a_, generator=a_, num_images_per_prompt=a_, ).images
_UpperCAmelCase : Any = int(math.sqrt(a_ ) )
_UpperCAmelCase : List[Any] = image_grid(a_, rows=_rows, cols=num_images_per_prompt // _rows )
return grid, images
__a = parse_args()
# Load models and create wrapper for stable diffusion
__a = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
__a = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
__a = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
__a = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
__a = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__a = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
__a = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
__a = unet.to(torch.device('cuda', args.cuda_id))
__a = pipeline.to(unet.device)
__a , __a = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
__a = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 145
| 0
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def _lowercase (self : Dict , __a : Any ):
if isinstance(__a , __a ):
UpperCAmelCase_ = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__(self : Union[str, Any] , __a : Optional[int] , __a : Optional[int] , __a : int ):
if len(__a ) == 0 or len(__a ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(__a ) )
if isinstance(__a , __a ):
UpperCAmelCase_ = [sequences]
UpperCAmelCase_ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__a )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCamelCase__ )
class __A ( UpperCamelCase__ ):
def __init__(self : Optional[Any] , __a : Union[str, Any]=ZeroShotClassificationArgumentHandler() , *__a : Optional[int] , **__a : List[str] ):
UpperCAmelCase_ = args_parser
super().__init__(*__a , **__a )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def _lowercase (self : str ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def _lowercase (self : Any , __a : Any , __a : int=True , __a : Dict=True , __a : Any=TruncationStrategy.ONLY_FIRST , **__a : Tuple ):
UpperCAmelCase_ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
UpperCAmelCase_ = self.tokenizer.eos_token
try:
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=__a , )
except Exception as e:
if "too short" in str(__a ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase (self : List[str] , **__a : Tuple ):
if kwargs.get("multi_class" , __a ) is not None:
UpperCAmelCase_ = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
UpperCAmelCase_ = {}
if "candidate_labels" in kwargs:
UpperCAmelCase_ = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
UpperCAmelCase_ = kwargs["hypothesis_template"]
UpperCAmelCase_ = {}
if "multi_label" in kwargs:
UpperCAmelCase_ = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__(self : Tuple , __a : Union[str, List[str]] , *__a : Optional[Any] , **__a : Tuple , ):
if len(__a ) == 0:
pass
elif len(__a ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase_ = args[0]
else:
raise ValueError(f"""Unable to understand extra arguments {args}""" )
return super().__call__(__a , **__a )
def _lowercase (self : Optional[int] , __a : Optional[Any] , __a : List[str]=None , __a : Any="This example is {}." ):
UpperCAmelCase_ , UpperCAmelCase_ = self._args_parser(__a , __a , __a )
for i, (candidate_label, sequence_pair) in enumerate(zip(__a , __a ) ):
UpperCAmelCase_ = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__a ) - 1,
**model_input,
}
def _lowercase (self : List[str] , __a : Any ):
UpperCAmelCase_ = inputs["candidate_label"]
UpperCAmelCase_ = inputs["sequence"]
UpperCAmelCase_ = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase_ = self.model(**__a )
UpperCAmelCase_ = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def _lowercase (self : Optional[Any] , __a : List[str] , __a : Tuple=False ):
UpperCAmelCase_ = [outputs["candidate_label"] for outputs in model_outputs]
UpperCAmelCase_ = [outputs["sequence"] for outputs in model_outputs]
UpperCAmelCase_ = np.concatenate([output["logits"].numpy() for output in model_outputs] )
UpperCAmelCase_ = logits.shape[0]
UpperCAmelCase_ = len(__a )
UpperCAmelCase_ = N // n
UpperCAmelCase_ = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__a ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase_ = self.entailment_id
UpperCAmelCase_ = -1 if entailment_id == 0 else 0
UpperCAmelCase_ = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase_ = np.exp(__a ) / np.exp(__a ).sum(-1 , keepdims=__a )
UpperCAmelCase_ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase_ = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase_ = np.exp(__a ) / np.exp(__a ).sum(-1 , keepdims=__a )
UpperCAmelCase_ = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 106
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[str] ={'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
SCREAMING_SNAKE_CASE_: Optional[int] ={
'abeja/gpt-neox-japanese-2.7b': 20_48,
}
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(snake_case_ ):
UpperCAmelCase_ = b
UpperCAmelCase_ = idx
for wd in b:
UpperCAmelCase_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __A ( UpperCamelCase__ ):
a__ : List[str] = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__(self : Any , __a : List[Any] , __a : Dict , __a : int="<|endoftext|>" , __a : Union[str, Any]="<|endoftext|>" , __a : int="<|startoftext|>" , __a : Tuple="<|endoftext|>" , __a : Optional[int]=False , **__a : int , ):
super().__init__(
unk_token=__a , pad_token=__a , bos_token=__a , eos_token=__a , do_clean_text=__a , **__a , )
if not os.path.isfile(__a ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(__a ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
UpperCAmelCase_ = do_clean_text
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = load_vocab_and_emoji(__a , __a )
UpperCAmelCase_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _lowercase (self : Optional[Any] ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def _lowercase (self : List[Any] ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _lowercase (self : List[Any] , __a : int ):
return self.subword_tokenizer.tokenize(__a , clean=self.do_clean_text )
def _lowercase (self : List[Any] , __a : List[str] ):
return self.vocab.get(__a , self.vocab.get(self.unk_token ) )
def _lowercase (self : int , __a : List[Any] ):
return self.subword_tokenizer.convert_id_to_token(__a )
def _lowercase (self : Dict , __a : str ):
UpperCAmelCase_ = "".join(__a ).strip()
return out_string
def _lowercase (self : int , __a : "Conversation" ):
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
def _lowercase (self : int , __a : str , __a : Optional[str] = None ):
UpperCAmelCase_ = 0
if os.path.isdir(__a ):
UpperCAmelCase_ = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(__a , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(",".join(__a ) + "\n" )
index += 1
with open(__a , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , __a )
return vocab_file, emoji_file
class __A ( UpperCamelCase__ ):
def __init__(self : List[Any] , __a : Dict , __a : Any , __a : int ):
UpperCAmelCase_ = vocab # same as swe
UpperCAmelCase_ = ids_to_tokens # same as bpe
UpperCAmelCase_ = emoji
UpperCAmelCase_ = np.max([len(__a ) for w in self.vocab.keys()] )
UpperCAmelCase_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
UpperCAmelCase_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
UpperCAmelCase_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
UpperCAmelCase_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
UpperCAmelCase_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCAmelCase_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCAmelCase_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__(self : Dict ):
return len(self.ids_to_tokens )
def _lowercase (self : str , __a : Union[str, Any] ):
UpperCAmelCase_ = self.content_repattera.sub("<URL>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<EMAIL>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<TEL>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<PRICE>" , __a )
UpperCAmelCase_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : str=False ):
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace("\r\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\r" , "<BR>" )
UpperCAmelCase_ = text.replace("\t" , "<TAB>" )
UpperCAmelCase_ = text.replace("—" , "ー" )
UpperCAmelCase_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase_ = text.replace(__a , __a )
if clean:
UpperCAmelCase_ = self.clean_text(__a )
def check_simbol(__a : List[Any] ):
UpperCAmelCase_ = x.encode()
if len(__a ) == 1 and len(__a ) == 2:
UpperCAmelCase_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc2_a1 and c <= 0Xc2_bf)
or (c >= 0Xc7_80 and c <= 0Xc7_83)
or (c >= 0Xca_b9 and c <= 0Xcb_bf)
or (c >= 0Xcc_80 and c <= 0Xcd_a2)
):
return True
return False
def checkuae(__a : Tuple ):
UpperCAmelCase_ = x.encode()
if len(__a ) == 1 and len(__a ) == 3:
UpperCAmelCase_ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe2_80_80 and c <= 0Xe2_b0_7f:
return True
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
while pos < len(__a ):
UpperCAmelCase_ = min(len(__a ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCAmelCase_ = [] # (token_id, token, pos)
for e in range(__a , __a , -1 ):
UpperCAmelCase_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__a ) > 2:
UpperCAmelCase_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__a ) > 0:
# the smallest token_id is adopted
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = sorted(__a , key=lambda __a : x[0] )[0]
result.append(__a )
UpperCAmelCase_ = e
else:
UpperCAmelCase_ = pos + 1
UpperCAmelCase_ = text[pos:end]
if check_simbol(__a ):
result.append("<KIGOU>" )
elif checkuae(__a ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
UpperCAmelCase_ = end
return result
def _lowercase (self : int , __a : Optional[Any] , __a : Optional[int]="\n" ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__a ) > 0:
words.append(bytearray(__a ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(__a )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(__a )
if len(__a ) > 0:
words.append(bytearray(__a ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = "".join(__a )
return text
| 106
| 1
|
"""simple docstring"""
import math
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Union[str, Any]:
"""simple docstring"""
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowerCAmelCase__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 194
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ ={
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 216
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self : List[Any] , snake_case__ : List[str] , snake_case__ : Any=7 , snake_case__ : Any=3 , snake_case__ : List[str]=1_0 , snake_case__ : List[Any]=1_8 , snake_case__ : Union[str, Any]=3_0 , snake_case__ : Optional[int]=4_0_0 , snake_case__ : Any=True , snake_case__ : Union[str, Any]=None , snake_case__ : Union[str, Any]=True , snake_case__ : Dict=[0.5, 0.5, 0.5] , snake_case__ : str=[0.5, 0.5, 0.5] , snake_case__ : str=None , ):
'''simple docstring'''
lowercase :Optional[Any] = size if size is not None else {'''shortest_edge''': 1_8}
lowercase :Any = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
lowercase :int = parent
lowercase :List[str] = batch_size
lowercase :str = num_channels
lowercase :Dict = num_frames
lowercase :Optional[Any] = image_size
lowercase :List[Any] = min_resolution
lowercase :Optional[Any] = max_resolution
lowercase :Optional[int] = do_resize
lowercase :Optional[Any] = size
lowercase :Tuple = do_normalize
lowercase :Optional[Any] = image_mean
lowercase :Optional[int] = image_std
lowercase :Optional[int] = crop_size
def __snake_case ( self : Any ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
__A : Tuple = VivitImageProcessor if is_vision_available() else None
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :List[Any] = VivitImageProcessingTester(self )
@property
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , '''image_mean''' ) )
self.assertTrue(hasattr(snake_case__ , '''image_std''' ) )
self.assertTrue(hasattr(snake_case__ , '''do_normalize''' ) )
self.assertTrue(hasattr(snake_case__ , '''do_resize''' ) )
self.assertTrue(hasattr(snake_case__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(snake_case__ , '''size''' ) )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
lowercase :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :int = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
lowercase :Optional[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for video in video_inputs:
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
lowercase :Optional[int] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase :List[str] = image_processing(snake_case__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase :Dict = prepare_video_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for video in video_inputs:
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
lowercase :List[str] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase :int = image_processing(snake_case__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase :Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for video in video_inputs:
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
lowercase :List[str] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase :Optional[Any] = image_processing(snake_case__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 172
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowerCamelCase (a_ :List[Any]) -> Any:
lowercase :Tuple = 384
lowercase :int = 7
if "tiny" in model_name:
lowercase :Union[str, Any] = 96
lowercase :Any = (2, 2, 6, 2)
lowercase :Any = (3, 6, 12, 24)
elif "small" in model_name:
lowercase :int = 96
lowercase :Any = (2, 2, 18, 2)
lowercase :Optional[int] = (3, 6, 12, 24)
elif "base" in model_name:
lowercase :Optional[int] = 128
lowercase :Any = (2, 2, 18, 2)
lowercase :Dict = (4, 8, 16, 32)
lowercase :Tuple = 12
lowercase :List[Any] = 512
elif "large" in model_name:
lowercase :List[str] = 192
lowercase :Optional[int] = (2, 2, 18, 2)
lowercase :Any = (6, 12, 24, 48)
lowercase :Any = 12
lowercase :str = 768
# set label information
lowercase :Optional[int] = 150
lowercase :str = '''huggingface/label-files'''
lowercase :List[str] = '''ade20k-id2label.json'''
lowercase :List[str] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowercase :Union[str, Any] = {int(a_): v for k, v in idalabel.items()}
lowercase :Dict = {v: k for k, v in idalabel.items()}
lowercase :List[str] = SwinConfig(
embed_dim=a_ , depths=a_ , num_heads=a_ , window_size=a_ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
lowercase :List[str] = UperNetConfig(
backbone_config=a_ , auxiliary_in_channels=a_ , num_labels=a_ , idalabel=a_ , labelaid=a_ , )
return config
def lowerCamelCase (a_ :Any) -> Optional[Any]:
lowercase :Any = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias'''))
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight'''))
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias'''))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias"""))
if i < 3:
rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight"""))
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight"""))
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias"""))
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight"""))
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias"""))
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
])
# fmt: on
return rename_keys
def lowerCamelCase (a_ :int , a_ :str , a_ :Union[str, Any]) -> int:
lowercase :Any = dct.pop(a_)
lowercase :Tuple = val
def lowerCamelCase (a_ :Optional[int] , a_ :int) -> Optional[int]:
lowercase :int = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
for i in range(len(backbone_config.depths)):
lowercase :List[str] = num_features[i]
for j in range(backbone_config.depths[i]):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowercase :Optional[Any] = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""")
lowercase :int = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""")
# next, add query, keys and values (in that order) to the state dict
lowercase :Optional[Any] = in_proj_weight[:dim, :]
lowercase :str = in_proj_bias[: dim]
lowercase :Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
lowercase :Optional[int] = in_proj_bias[
dim : dim * 2
]
lowercase :List[str] = in_proj_weight[
-dim :, :
]
lowercase :Any = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase (a_ :Tuple) -> Any:
lowercase , lowercase :Tuple = x.shape
lowercase :str = x.reshape(a_ , 4 , in_channel // 4)
lowercase :str = x[:, [0, 2, 1, 3], :].transpose(1 , 2).reshape(a_ , a_)
return x
def lowerCamelCase (a_ :Optional[int]) -> Tuple:
lowercase , lowercase :Any = x.shape
lowercase :Union[str, Any] = x.reshape(a_ , in_channel // 4 , 4)
lowercase :int = x[:, :, [0, 2, 1, 3]].transpose(1 , 2).reshape(a_ , a_)
return x
def lowerCamelCase (a_ :Any) -> Union[str, Any]:
lowercase :int = x.shape[0]
lowercase :Any = x.reshape(4 , in_channel // 4)
lowercase :Any = x[[0, 2, 1, 3], :].transpose(0 , 1).reshape(a_)
return x
def lowerCamelCase (a_ :str) -> Union[str, Any]:
lowercase :List[str] = x.shape[0]
lowercase :Optional[Any] = x.reshape(in_channel // 4 , 4)
lowercase :List[Any] = x[:, [0, 2, 1, 3]].transpose(0 , 1).reshape(a_)
return x
def lowerCamelCase (a_ :int , a_ :Optional[int] , a_ :List[Any]) -> Dict:
lowercase :Dict = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
lowercase :List[Any] = model_name_to_url[model_name]
lowercase :Tuple = torch.hub.load_state_dict_from_url(a_ , map_location='''cpu''' , file_name=a_)[
'''state_dict'''
]
for name, param in state_dict.items():
print(a_ , param.shape)
lowercase :Union[str, Any] = get_upernet_config(a_)
lowercase :int = UperNetForSemanticSegmentation(a_)
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowercase :Union[str, Any] = state_dict.pop(a_)
if "bn" in key:
lowercase :int = key.replace('''bn''' , '''batch_norm''')
lowercase :str = val
# rename keys
lowercase :List[str] = create_rename_keys(a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , config.backbone_config)
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowercase :str = reverse_correct_unfold_reduction_order(a_)
if "norm" in key:
lowercase :int = reverse_correct_unfold_norm_order(a_)
model.load_state_dict(a_)
# verify on image
lowercase :Dict = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
lowercase :str = Image.open(requests.get(a_ , stream=a_).raw).convert('''RGB''')
lowercase :Union[str, Any] = SegformerImageProcessor()
lowercase :int = processor(a_ , return_tensors='''pt''').pixel_values
with torch.no_grad():
lowercase :Optional[int] = model(a_)
lowercase :int = outputs.logits
print(logits.shape)
print('''First values of logits:''' , logits[0, 0, :3, :3])
# assert values
if model_name == "upernet-swin-tiny":
lowercase :Union[str, Any] = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]])
elif model_name == "upernet-swin-small":
lowercase :List[str] = torch.tensor(
[[-7.19_21, -7.19_21, -6.95_32], [-7.19_21, -7.19_21, -6.95_32], [-7.09_08, -7.09_08, -6.85_34]])
elif model_name == "upernet-swin-base":
lowercase :Tuple = torch.tensor(
[[-6.58_51, -6.58_51, -6.43_30], [-6.58_51, -6.58_51, -6.43_30], [-6.47_63, -6.47_63, -6.32_54]])
elif model_name == "upernet-swin-large":
lowercase :str = torch.tensor(
[[-7.52_97, -7.52_97, -7.38_02], [-7.52_97, -7.52_97, -7.38_02], [-7.40_44, -7.40_44, -7.25_86]])
print('''Logits:''' , outputs.logits[0, 0, :3, :3])
assert torch.allclose(outputs.logits[0, 0, :3, :3] , a_ , atol=1E-4)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(a_)
print(F"""Saving processor to {pytorch_dump_folder_path}""")
processor.save_pretrained(a_)
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""")
model.push_to_hub(F"""openmmlab/{model_name}""")
processor.push_to_hub(F"""openmmlab/{model_name}""")
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[F"""upernet-swin-{size}""" for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 172
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = KandinskyVaaImgaImgPipeline
UpperCAmelCase_ : Any = ['''image_embeds''', '''negative_image_embeds''', '''image''']
UpperCAmelCase_ : Optional[int] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
UpperCAmelCase_ : Optional[Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase_ : Union[str, Any] = False
@property
def a_ ( self):
"""simple docstring"""
return 32
@property
def a_ ( self):
"""simple docstring"""
return 32
@property
def a_ ( self):
"""simple docstring"""
return self.time_input_dim
@property
def a_ ( self):
"""simple docstring"""
return self.time_input_dim * 4
@property
def a_ ( self):
"""simple docstring"""
return 100
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowerCAmelCase = UNetaDConditionModel(**__lowerCAmelCase)
return model
@property
def a_ ( self):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = VQModel(**self.dummy_movq_kwargs)
return model
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.dummy_unet
lowerCAmelCase = self.dummy_movq
lowerCAmelCase = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
lowerCAmelCase = DDIMScheduler(**__lowerCAmelCase)
lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=0):
"""simple docstring"""
lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCAmelCase)).to(__lowerCAmelCase)
lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
__lowerCAmelCase)
# create init_image
lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase)).to(__lowerCAmelCase)
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
lowerCAmelCase = Image.fromarray(np.uinta(__lowerCAmelCase)).convert("""RGB""").resize((256, 256))
if str(__lowerCAmelCase).startswith("""mps"""):
lowerCAmelCase = torch.manual_seed(__lowerCAmelCase)
else:
lowerCAmelCase = torch.Generator(device=__lowerCAmelCase).manual_seed(__lowerCAmelCase)
lowerCAmelCase = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """cpu"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**__lowerCAmelCase)
lowerCAmelCase = pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = pipe(**self.get_dummy_inputs(__lowerCAmelCase))
lowerCAmelCase = output.images
lowerCAmelCase = pipe(
**self.get_dummy_inputs(__lowerCAmelCase) , return_dict=__lowerCAmelCase , )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""")
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""")
lowerCAmelCase = """A red cartoon frog, 4k"""
lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa)
pipe_prior.to(__lowerCAmelCase)
lowerCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa)
lowerCAmelCase = pipeline.to(__lowerCAmelCase)
pipeline.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.Generator(device="""cpu""").manual_seed(0)
lowerCAmelCase , lowerCAmelCase = pipe_prior(
__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowerCAmelCase = pipeline(
image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase)
| 272
|
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case__ ( _A: jnp.ndarray , _A: int , _A: float = 1 , _A: float = 1 , _A: float = 1.0e4 , _A: bool = False , _A: float = 1.0 , ) -> jnp.ndarray:
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even"
lowerCAmelCase = float(embedding_dim // 2 )
lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCAmelCase = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 )
# scale embeddings
lowerCAmelCase = scale * emb
if flip_sin_to_cos:
lowerCAmelCase = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 )
else:
lowerCAmelCase = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 )
lowerCAmelCase = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] )
return signal
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""")(__lowerCAmelCase)
lowerCAmelCase = nn.silu(__lowerCAmelCase)
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""")(__lowerCAmelCase)
return temb
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : float = 1
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
return get_sinusoidal_embeddings(
__lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
| 272
| 1
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def UpperCamelCase_( snake_case__: Optional[int] ) -> Any:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_UpperCamelCase = '''\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'''
class lowercase ( __a ):
'''simple docstring'''
@staticmethod
def UpperCamelCase__ (__a ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=a__ , required=a__ , help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' , type=a__ , required=a__ , help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' , type=a__ , required=a__ , help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' , type=a__ , default='' , help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' , type=a__ , default=a__ , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=a__ )
def __init__(self , __a , __a , __a , __a , __a , *__a , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = logging.get_logger('transformers-cli/converting' )
self._logger.info(F"Loading model {model_type}" )
UpperCAmelCase__ = model_type
UpperCAmelCase__ = tf_checkpoint
UpperCAmelCase__ = pytorch_dump_output
UpperCAmelCase__ = config
UpperCAmelCase__ = finetuning_task_name
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(a__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
if "ckpt" in self._tf_checkpoint.lower():
UpperCAmelCase__ = self._tf_checkpoint
UpperCAmelCase__ = ''
else:
UpperCAmelCase__ = self._tf_checkpoint
UpperCAmelCase__ = ''
convert_transfo_xl_checkpoint_to_pytorch(
a__ , self._config , self._pytorch_dump_output , a__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 360
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """sew-d"""
def __init__(self , __a=32 , __a=768 , __a=12 , __a=12 , __a=3072 , __a=2 , __a=512 , __a=256 , __a=True , __a=True , __a=("p2c", "c2p") , __a="layer_norm" , __a="gelu_python" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.02 , __a=1E-7 , __a=1E-5 , __a="group" , __a="gelu" , __a=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __a=False , __a=128 , __a=16 , __a=True , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=0 , __a="mean" , __a=False , __a=False , __a=256 , __a=0 , __a=1 , __a=2 , **__a , ) -> str:
"""simple docstring"""
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = feat_extract_norm
UpperCAmelCase__ = feat_extract_activation
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = conv_bias
UpperCAmelCase__ = num_conv_pos_embeddings
UpperCAmelCase__ = num_conv_pos_embedding_groups
UpperCAmelCase__ = len(self.conv_dim )
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = squeeze_factor
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = position_buckets
UpperCAmelCase__ = share_att_key
UpperCAmelCase__ = relative_attention
UpperCAmelCase__ = norm_rel_ebd
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = feat_proj_dropout
UpperCAmelCase__ = final_dropout
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = feature_layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase__ = apply_spec_augment
UpperCAmelCase__ = mask_time_prob
UpperCAmelCase__ = mask_time_length
UpperCAmelCase__ = mask_time_min_masks
UpperCAmelCase__ = mask_feature_prob
UpperCAmelCase__ = mask_feature_length
UpperCAmelCase__ = mask_feature_min_masks
# ctc loss
UpperCAmelCase__ = ctc_loss_reduction
UpperCAmelCase__ = ctc_zero_infinity
# sequence classification
UpperCAmelCase__ = use_weighted_layer_sum
UpperCAmelCase__ = classifier_proj_size
@property
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 335
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["ConditionalDetrFeatureExtractor"]
__A = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 90
|
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase__ = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
for attribute in key.split('.' ):
__lowerCAmelCase : str = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
__lowerCAmelCase : Tuple = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
__lowerCAmelCase : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__lowerCAmelCase : List[Any] = value
elif weight_type == "weight_v":
__lowerCAmelCase : Any = value
elif weight_type == "bias":
__lowerCAmelCase : List[str] = value
else:
__lowerCAmelCase : List[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[int] = fairseq_model.state_dict()
__lowerCAmelCase : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
__lowerCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCAmelCase : int = True
if "*" in mapped_key:
__lowerCAmelCase : List[str] = name.split(_UpperCamelCase )[0].split('.' )[-2]
__lowerCAmelCase : Optional[Any] = mapped_key.replace('*' , _UpperCamelCase )
if "weight_g" in name:
__lowerCAmelCase : Union[str, Any] = 'weight_g'
elif "weight_v" in name:
__lowerCAmelCase : int = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCAmelCase : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase : List[str] = 'weight'
else:
__lowerCAmelCase : Optional[Any] = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = full_name.split('conv_layers.' )[-1]
__lowerCAmelCase : Any = name.split('.' )
__lowerCAmelCase : List[Any] = int(items[0] )
__lowerCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__lowerCAmelCase : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__lowerCAmelCase : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__lowerCAmelCase : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__lowerCAmelCase : Any = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
# load the pre-trained checkpoints
__lowerCAmelCase : Any = torch.load(_UpperCamelCase )
__lowerCAmelCase : List[str] = WavLMConfigOrig(checkpoint['cfg'] )
__lowerCAmelCase : Optional[Any] = WavLMOrig(_UpperCamelCase )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__lowerCAmelCase : Dict = WavLMConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCAmelCase : List[str] = WavLMConfig()
__lowerCAmelCase : List[str] = WavLMModel(_UpperCamelCase )
recursively_load_weights(_UpperCamelCase , _UpperCamelCase )
hf_wavlm.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 86
| 0
|
"""simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Dict ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
A_ : str =direct_transformers_import(PATH_TO_TRANSFORMERS)
A_ : str =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : List[str] =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
A_ : List[str] ={
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def SCREAMING_SNAKE_CASE_ ( snake_case : Tuple )-> Dict:
_lowerCamelCase = None
# source code of `config_class`
_lowerCamelCase = inspect.getsource(snake_case )
_lowerCamelCase = _re_checkpoint.findall(snake_case )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
_lowerCamelCase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_lowerCamelCase = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_lowerCamelCase = ckpt_name
break
return checkpoint
def SCREAMING_SNAKE_CASE_ ( )-> List[Any]:
_lowerCamelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_lowerCamelCase = get_checkpoint_from_config_class(snake_case )
_lowerCamelCase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(snake_case )
if len(snake_case ) > 0:
_lowerCamelCase = '\n'.join(sorted(snake_case ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 80
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ : int =logging.get_logger(__name__)
A_ : Tuple ={
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : int = "deta"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , a__=None , a__=9_00 , a__=20_48 , a__=6 , a__=20_48 , a__=8 , a__=6 , a__=10_24 , a__=8 , a__=0.0 , a__=True , a__="relu" , a__=2_56 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.02 , a__=1.0 , a__=True , a__=False , a__="sine" , a__=5 , a__=4 , a__=4 , a__=True , a__=3_00 , a__=True , a__=True , a__=1 , a__=5 , a__=2 , a__=1 , a__=1 , a__=5 , a__=2 , a__=0.1 , a__=0.25 , **a__ , ):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_lowerCamelCase = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(a__ , a__ ):
_lowerCamelCase = backbone_config.pop('model_type' )
_lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase = config_class.from_dict(a__ )
_lowerCamelCase = backbone_config
_lowerCamelCase = num_queries
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = d_model
_lowerCamelCase = encoder_ffn_dim
_lowerCamelCase = encoder_layers
_lowerCamelCase = encoder_attention_heads
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = activation_dropout
_lowerCamelCase = activation_function
_lowerCamelCase = init_std
_lowerCamelCase = init_xavier_std
_lowerCamelCase = encoder_layerdrop
_lowerCamelCase = auxiliary_loss
_lowerCamelCase = position_embedding_type
# deformable attributes
_lowerCamelCase = num_feature_levels
_lowerCamelCase = encoder_n_points
_lowerCamelCase = decoder_n_points
_lowerCamelCase = two_stage
_lowerCamelCase = two_stage_num_proposals
_lowerCamelCase = with_box_refine
_lowerCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
_lowerCamelCase = class_cost
_lowerCamelCase = bbox_cost
_lowerCamelCase = giou_cost
# Loss coefficients
_lowerCamelCase = mask_loss_coefficient
_lowerCamelCase = dice_loss_coefficient
_lowerCamelCase = bbox_loss_coefficient
_lowerCamelCase = giou_loss_coefficient
_lowerCamelCase = eos_coefficient
_lowerCamelCase = focal_alpha
super().__init__(is_encoder_decoder=a__ , **a__ )
@property
def snake_case_ ( self ):
return self.encoder_attention_heads
@property
def snake_case_ ( self ):
return self.d_model
def snake_case_ ( self ):
_lowerCamelCase = copy.deepcopy(self.__dict__ )
_lowerCamelCase = self.backbone_config.to_dict()
_lowerCamelCase = self.__class__.model_type
return output
| 80
| 1
|
lowerCAmelCase__ : Tuple = [0, 2, 4, 6, 8]
lowerCAmelCase__ : Union[str, Any] = [1, 3, 5, 7, 9]
def UpperCamelCase__ ( A__ , A__ , A__ , A__ ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case__ : List[Any] = 0
for digit in range(10 ):
snake_case__ : Tuple = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , lowercase_ , lowercase_ )
return result
snake_case__ : List[str] = 0
for digita in range(10 ):
snake_case__ : Dict = digita
if (remainder + digita) % 2 == 0:
snake_case__ : int = ODD_DIGITS
else:
snake_case__ : str = EVEN_DIGITS
for digita in other_parity_digits:
snake_case__ : Optional[int] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , lowercase_ , lowercase_ , )
return result
def UpperCamelCase__ ( A__ = 9 ) -> int:
snake_case__ : List[str] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(lowercase_ , 0 , [0] * length , lowercase_ )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 143
|
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
SCREAMING_SNAKE_CASE = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
SCREAMING_SNAKE_CASE = dataset.iloc[:, 1:2].values
SCREAMING_SNAKE_CASE = dataset.iloc[:, 2].values
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = train_test_split(X, y, test_size=0.2, random_state=0)
SCREAMING_SNAKE_CASE = PolynomialFeatures(degree=4)
SCREAMING_SNAKE_CASE = poly_reg.fit_transform(X)
SCREAMING_SNAKE_CASE = LinearRegression()
pol_reg.fit(X_poly, y)
def _SCREAMING_SNAKE_CASE ( ) -> int:
plt.scatter(lowercase_ , lowercase_ , color="red" )
plt.plot(lowercase_ , pol_reg.predict(poly_reg.fit_transform(lowercase_ ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 247
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Optional[Any] =logging.get_logger(__name__)
A__ : Dict ={
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Tuple = '''xlm'''
_lowercase: str = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self : int , __snake_case : Tuple=3_01_45 , __snake_case : int=20_48 , __snake_case : Dict=12 , __snake_case : Optional[int]=16 , __snake_case : List[str]=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Union[str, Any]=True , __snake_case : Optional[Any]=False , __snake_case : Union[str, Any]=False , __snake_case : Optional[Any]=False , __snake_case : Tuple=1 , __snake_case : Any=True , __snake_case : List[str]=5_12 , __snake_case : List[Any]=20_48**-0.5 , __snake_case : Union[str, Any]=1E-1_2 , __snake_case : List[str]=0.02 , __snake_case : Dict=0 , __snake_case : Optional[Any]=1 , __snake_case : List[Any]=2 , __snake_case : int=3 , __snake_case : Optional[int]=5 , __snake_case : Optional[int]=True , __snake_case : int="first" , __snake_case : Any=True , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : Any=0.1 , __snake_case : Tuple=5 , __snake_case : int=5 , __snake_case : Optional[Any]=0 , __snake_case : Optional[int]=0 , __snake_case : Optional[Any]=2 , __snake_case : Optional[Any]=0 , **__snake_case : Optional[Any] , ) -> Tuple:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = emb_dim
_lowerCAmelCase = n_layers
_lowerCAmelCase = n_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = gelu_activation
_lowerCAmelCase = sinusoidal_embeddings
_lowerCAmelCase = causal
_lowerCAmelCase = asm
_lowerCAmelCase = n_langs
_lowerCAmelCase = use_lang_emb
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = bos_index
_lowerCAmelCase = eos_index
_lowerCAmelCase = pad_index
_lowerCAmelCase = unk_index
_lowerCAmelCase = mask_index
_lowerCAmelCase = is_encoder
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = embed_init_std
_lowerCAmelCase = init_std
_lowerCAmelCase = summary_type
_lowerCAmelCase = summary_use_proj
_lowerCAmelCase = summary_activation
_lowerCAmelCase = summary_proj_to_labels
_lowerCAmelCase = summary_first_dropout
_lowerCAmelCase = start_n_top
_lowerCAmelCase = end_n_top
_lowerCAmelCase = mask_token_id
_lowerCAmelCase = lang_id
if "n_words" in kwargs:
_lowerCAmelCase = kwargs["""n_words"""]
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , **__snake_case )
class UpperCAmelCase ( snake_case_ ):
@property
def lowercase__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 356
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : int ) -> Union[str, Any]:
_lowerCAmelCase = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
_lowerCAmelCase = test_metrics
@require_cpu
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def lowercase__ ( self : Tuple ) -> Tuple:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def lowercase__ ( self : Union[str, Any] ) -> str:
self.test_metrics.main()
@require_multi_gpu
def lowercase__ ( self : str ) -> List[str]:
print(f"Found {torch.cuda.device_count()} devices." )
_lowerCAmelCase = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 220
| 0
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=lowerCAmelCase_ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=lowerCAmelCase_ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=lowerCAmelCase_ )
return parser.parse_args()
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parse_args()
# Import training_script as a module.
__SCREAMING_SNAKE_CASE = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__SCREAMING_SNAKE_CASE = script_fpath.stem
__SCREAMING_SNAKE_CASE = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
__SCREAMING_SNAKE_CASE = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 54
|
import numpy as np
from PIL import Image
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray:
__lowercase : Optional[int] = np.array(__lowerCAmelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__lowercase : Optional[int] = 0
__lowercase : Union[str, Any] = 0
__lowercase : Optional[Any] = 0
__lowercase : str = 0
# compute the shape of the output matrix
__lowercase : Optional[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__lowercase : List[str] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__lowercase : Optional[int] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowercase : Any = 0
__lowercase : List[Any] = 0
return updated_arr
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray:
__lowercase : Optional[Any] = np.array(__lowerCAmelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__lowercase : int = 0
__lowercase : str = 0
__lowercase : List[str] = 0
__lowercase : Dict = 0
# compute the shape of the output matrix
__lowercase : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__lowercase : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__lowercase : str = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowercase : int = 0
__lowercase : Tuple = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
__lowerCAmelCase : List[Any] = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 156
| 0
|
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = SwinConfig()
lowerCAmelCase__ : Any = swin_name.split("""_""" )
lowerCAmelCase__ : List[Any] = name_split[1]
lowerCAmelCase__ : Any = int(name_split[4] )
lowerCAmelCase__ : Any = int(name_split[3][-1] )
if model_size == "tiny":
lowerCAmelCase__ : Optional[int] = 96
lowerCAmelCase__ : Optional[int] = (2, 2, 6, 2)
lowerCAmelCase__ : Tuple = (3, 6, 12, 24)
elif model_size == "small":
lowerCAmelCase__ : Dict = 96
lowerCAmelCase__ : List[Any] = (2, 2, 18, 2)
lowerCAmelCase__ : List[str] = (3, 6, 12, 24)
elif model_size == "base":
lowerCAmelCase__ : Dict = 128
lowerCAmelCase__ : List[Any] = (2, 2, 18, 2)
lowerCAmelCase__ : List[str] = (4, 8, 16, 32)
else:
lowerCAmelCase__ : Union[str, Any] = 192
lowerCAmelCase__ : str = (2, 2, 18, 2)
lowerCAmelCase__ : Any = (6, 12, 24, 48)
if "in22k" in swin_name:
lowerCAmelCase__ : Dict = 21841
else:
lowerCAmelCase__ : Union[str, Any] = 1000
lowerCAmelCase__ : Dict = """huggingface/label-files"""
lowerCAmelCase__ : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCAmelCase__ : str = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase__ : Tuple = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[Any] = idalabel
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : List[str] = img_size
lowerCAmelCase__ : Optional[Any] = num_classes
lowerCAmelCase__ : Optional[int] = embed_dim
lowerCAmelCase__ : Union[str, Any] = depths
lowerCAmelCase__ : List[Any] = num_heads
lowerCAmelCase__ : Tuple = window_size
return config
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if "patch_embed.proj" in name:
lowerCAmelCase__ : List[str] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
lowerCAmelCase__ : List[str] = """encoder.""" + name
if "attn.proj" in name:
lowerCAmelCase__ : int = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCAmelCase__ : str = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCAmelCase__ : Tuple = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase__ : List[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase__ : int = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase__ : List[Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
lowerCAmelCase__ : List[Any] = """layernorm.weight"""
if name == "norm.bias":
lowerCAmelCase__ : Any = """layernorm.bias"""
if "head" in name:
lowerCAmelCase__ : Tuple = name.replace("""head""" , """classifier""" )
else:
lowerCAmelCase__ : Any = """swin.""" + name
return name
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ : List[Any] = orig_state_dict.pop(UpperCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
lowerCAmelCase__ : Tuple = key.split(""".""" )
lowerCAmelCase__ : Tuple = int(key_split[1] )
lowerCAmelCase__ : List[Any] = int(key_split[3] )
lowerCAmelCase__ : str = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase__ : Any = val[:dim, :]
lowerCAmelCase__ : List[Any] = val[
dim : dim * 2, :
]
lowerCAmelCase__ : str = val[-dim:, :]
else:
lowerCAmelCase__ : str = val[
:dim
]
lowerCAmelCase__ : int = val[
dim : dim * 2
]
lowerCAmelCase__ : int = val[
-dim:
]
else:
lowerCAmelCase__ : str = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = timm.create_model(UpperCamelCase , pretrained=UpperCamelCase )
timm_model.eval()
lowerCAmelCase__ : Optional[Any] = get_swin_config(UpperCamelCase )
lowerCAmelCase__ : Tuple = SwinForImageClassification(UpperCamelCase )
model.eval()
lowerCAmelCase__ : Tuple = convert_state_dict(timm_model.state_dict() , UpperCamelCase )
model.load_state_dict(UpperCamelCase )
lowerCAmelCase__ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
lowerCAmelCase__ : Union[str, Any] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
lowerCAmelCase__ : List[str] = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
lowerCAmelCase__ : Optional[int] = timm_model(inputs["""pixel_values"""] )
lowerCAmelCase__ : List[str] = model(**UpperCamelCase ).logits
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 )
print(f"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 350
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if any(not isinstance(UpperCamelCase , UpperCamelCase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(UpperCamelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(UpperCamelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 184
| 0
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__lowercase : Optional[Any] = pd.read_csv('sample_data.csv', header=None)
__lowercase : str = df.shape[:1][0]
# If you're using some other dataset input the target column
__lowercase : Tuple = df.iloc[:, 1:2]
__lowercase : List[Any] = actual_data.values.reshape(len_data, 1)
__lowercase : List[Any] = MinMaxScaler().fit_transform(actual_data)
__lowercase : Dict = 10
__lowercase : Optional[Any] = 5
__lowercase : Tuple = 20
__lowercase : Any = len_data - periods * look_back
__lowercase : str = actual_data[:division]
__lowercase : List[Any] = actual_data[division - look_back :]
__lowercase , __lowercase : List[Any] = [], []
__lowercase , __lowercase : str = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__lowercase : Optional[int] = np.array(train_x)
__lowercase : Optional[Any] = np.array(test_x)
__lowercase : int = np.array([list(i.ravel()) for i in train_y])
__lowercase : Optional[int] = np.array([list(i.ravel()) for i in test_y])
__lowercase : Dict = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
__lowercase : List[Any] = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
__lowercase : List[str] = model.predict(x_test)
| 27
|
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_A = """bart"""
_A = True
@st.cache(allow_output_mutation=__UpperCAmelCase )
def lowercase_ ( ) -> Optional[Any]:
if LOAD_DENSE_INDEX:
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
lowerCAmelCase__ : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
lowerCAmelCase__ : List[str] = qar_model.eval()
else:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
lowerCAmelCase__ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
lowerCAmelCase__ : List[str] = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
lowerCAmelCase__ : Dict = sas_model.eval()
else:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__UpperCAmelCase )
def lowercase_ ( ) -> Union[str, Any]:
if LOAD_DENSE_INDEX:
lowerCAmelCase__ : Union[str, Any] = faiss.StandardGpuResources()
lowerCAmelCase__ : List[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
lowerCAmelCase__ : Dict = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
lowerCAmelCase__ : str = faiss.IndexFlatIP(128 )
lowerCAmelCase__ : Optional[int] = faiss.index_cpu_to_gpu(__UpperCAmelCase , 1 , __UpperCAmelCase )
wikiaab_gpu_index_flat.add(__UpperCAmelCase ) # TODO fix for larger GPU
else:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = (None, None)
lowerCAmelCase__ : Optional[int] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__UpperCAmelCase )
def lowercase_ ( ) -> List[str]:
lowerCAmelCase__ : List[Any] = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
lowerCAmelCase__ : Tuple = elia["""train_eli5"""]
lowerCAmelCase__ : Dict = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
lowerCAmelCase__ : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(__UpperCAmelCase )
return (elia_train, eli5_train_q_index)
_A , _A , _A = load_indexes()
_A , _A , _A , _A = load_models()
_A , _A = load_train_data()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=10 ) -> Optional[Any]:
lowerCAmelCase__ : str = embed_questions_for_retrieval([question] , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = eli5_train_q_index.search(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[Any] = [elia_train[int(__UpperCAmelCase )] for i in I[0]]
return nn_examples
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase="wiki40b" , __UpperCAmelCase="dense" , __UpperCAmelCase=10 ) -> List[str]:
if source == "none":
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = query_qa_dense_index(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
lowerCAmelCase__ , lowerCAmelCase__ : int = query_es_index(
__UpperCAmelCase , __UpperCAmelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=__UpperCAmelCase , )
lowerCAmelCase__ : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
lowerCAmelCase__ : Optional[Any] = """question: {} context: {}""".format(__UpperCAmelCase , __UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __UpperCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __UpperCAmelCase : None),
} )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=64 , __UpperCAmelCase=256 , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=0.95 , __UpperCAmelCase=0.8 ) -> Optional[int]:
with torch.no_grad():
lowerCAmelCase__ : List[Any] = qa_sas_generate(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_answers=1 , num_beams=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase , do_sample=__UpperCAmelCase , temp=__UpperCAmelCase , top_p=__UpperCAmelCase , top_k=__UpperCAmelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_A = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_A = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_A = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_A = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_A = st.sidebar.checkbox("""Demo options""")
if demo_options:
_A = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_A = action_list.index(action_st)
_A = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_A = show_type == """Show full text of passages"""
else:
_A = 3
_A = True
_A = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_A = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_A = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_A = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_A = """wiki40b"""
_A = """dense"""
_A = """beam"""
_A = 2
_A = 6_4
_A = 2_5_6
_A = None
_A = None
_A = st.sidebar.checkbox("""Generation options""")
if generate_options:
_A = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_A = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_A = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
_A = st.sidebar.slider(
"""Maximum generation length""", min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
_A = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_A = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_A = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_A = None
# start main text
_A = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_A = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_A = st.text_input("""Enter your question here:""", """""")
else:
_A = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_A , _A = make_support(question, source=wiki_source, method="""dense""", n_results=1_0)
_A , _A = make_support(question, source=wiki_source, method="""sparse""", n_results=1_0)
_A = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_A = support_list[:1_0]
_A = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_A , _A = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
_A , _A = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_A = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_A = res[1].strip()
if sec_titles == "":
_A = """[{}]({})""".format(res[0], wiki_url)
else:
_A = sec_titles.split(""" & """)
_A = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_A = find_nearest_training(question)
_A = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_A = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_A = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 242
| 0
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = """trajectory_transformer"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ : List[str] = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase__=100 , lowerCAmelCase__=5 , lowerCAmelCase__=1 , lowerCAmelCase__=1 , lowerCAmelCase__=249 , lowerCAmelCase__=6 , lowerCAmelCase__=17 , lowerCAmelCase__=25 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=128 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.00_06 , lowerCAmelCase__=512 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1e-12 , lowerCAmelCase__=1 , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=50_256 , lowerCAmelCase__=50_256 , **lowerCAmelCase__ , ) -> Dict:
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = action_weight
SCREAMING_SNAKE_CASE = reward_weight
SCREAMING_SNAKE_CASE = value_weight
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = block_size
SCREAMING_SNAKE_CASE = action_dim
SCREAMING_SNAKE_CASE = observation_dim
SCREAMING_SNAKE_CASE = transition_dim
SCREAMING_SNAKE_CASE = learning_rate
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = n_embd
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = attn_pdrop
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = kaiming_initializer_range
SCREAMING_SNAKE_CASE = use_cache
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 38
|
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__UpperCamelCase = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__UpperCamelCase = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__UpperCamelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__UpperCamelCase = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__UpperCamelCase = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__UpperCamelCase = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__UpperCamelCase = tf.keras.preprocessing.image.img_to_array(test_image)
__UpperCamelCase = np.expand_dims(test_image, axis=0)
__UpperCamelCase = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__UpperCamelCase = '''Normal'''
if result[0][0] == 1:
__UpperCamelCase = '''Abnormality detected'''
| 38
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : int =VOCAB_FILES_NAMES
__lowerCamelCase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Any =['input_ids', 'attention_mask']
__lowerCamelCase : Dict =None
def __init__( self : List[str] , __lowercase : List[Any]=None , __lowercase : List[Any]=None , __lowercase : Union[str, Any]=None , __lowercase : int="<unk>" , __lowercase : Tuple="<s>" , __lowercase : Any="</s>" , __lowercase : Union[str, Any]="<pad>" , __lowercase : List[str]=False , __lowercase : List[str]=False , **__lowercase : List[str] , ):
'''simple docstring'''
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , add_prefix_space=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __lowercase ) != add_prefix_space:
__a = getattr(__lowercase , pre_tok_state.pop("""type""" ) )
__a = add_prefix_space
__a = pre_tok_class(**__lowercase )
__a = add_prefix_space
def UpperCamelCase_ ( self : str , *__lowercase : int , **__lowercase : int ):
'''simple docstring'''
__a = kwargs.get("""is_split_into_words""" , __lowercase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Optional[Any] , *__lowercase : Optional[Any] , **__lowercase : Union[str, Any] ):
'''simple docstring'''
__a = kwargs.get("""is_split_into_words""" , __lowercase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
""" pretokenized inputs.""" )
return super()._encode_plus(*__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
__a = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : "Conversation" ):
'''simple docstring'''
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase ) + [self.eos_token_id] )
if len(__lowercase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 302
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase__ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] ='albert'
def __init__( self : Optional[Any] , __lowercase : Union[str, Any]=30000 , __lowercase : List[str]=128 , __lowercase : Optional[Any]=4096 , __lowercase : Dict=12 , __lowercase : Any=1 , __lowercase : Optional[Any]=64 , __lowercase : Any=16384 , __lowercase : Any=1 , __lowercase : Union[str, Any]="gelu_new" , __lowercase : List[str]=0 , __lowercase : int=0 , __lowercase : Dict=512 , __lowercase : str=2 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=1E-12 , __lowercase : int=0.1 , __lowercase : Any="absolute" , __lowercase : Optional[int]=0 , __lowercase : Dict=2 , __lowercase : Optional[Any]=3 , **__lowercase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__a = vocab_size
__a = embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_hidden_groups
__a = num_attention_heads
__a = inner_group_num
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = classifier_dropout_prob
__a = position_embedding_type
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 302
| 1
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> int:
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=A , )
assert hasattr(self , """env""" )
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : List[str] = f'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}'''
# distributed data settings
UpperCAmelCase : Optional[int] = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=A , instance_count=A , instance_type=self.instance_type , debugger_hook_config=A , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=A , py_version="""py36""" , )
def _lowercase( self , A ) -> Tuple:
TrainingJobAnalytics(A ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def _lowercase( self , A ) -> str:
# create estimator
UpperCAmelCase : int = self.create_estimator(A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
UpperCAmelCase : str = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase : Dict = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , A )
| 338
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a : Optional[int] = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 338
| 1
|
import datasets
__lowercase = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
__lowercase = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
__lowercase = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32'''),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32'''),
}) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Optional[Any]:
return {"accuracy": simple_accuracy(__lowercase , __lowercase)}
| 43
|
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class _UpperCamelCase ( lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ = FlaxAutoencoderKL
@property
def UpperCAmelCase_ ( self :int ) -> Optional[int]:
UpperCAmelCase__ = 4
UpperCAmelCase__ = 3
UpperCAmelCase__ = (32, 32)
UpperCAmelCase__ = jax.random.PRNGKey(0 )
UpperCAmelCase__ = jax.random.uniform(lowerCamelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def UpperCAmelCase_ ( self :Optional[Any] ) -> Any:
UpperCAmelCase__ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
| 169
| 0
|
'''simple docstring'''
import math
def _A (lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> str:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(SCREAMING_SNAKE_CASE_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 364
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
a_ : int = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 104
| 0
|
def _a ( SCREAMING_SNAKE_CASE_ : float ):
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _a ( SCREAMING_SNAKE_CASE_ : float ):
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92
|
def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00_00_00 ):
__lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 92
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __a ( ) ->Dict:
a__: Tuple = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
a__: Optional[int] = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert('RGB' )
return image
def __a ( _SCREAMING_SNAKE_CASE ) ->Dict:
a__: Dict = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
a__: Dict = dct.pop(__lowerCAmelCase )
a__: Optional[int] = val
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
a__: List[Any] = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
a__: str = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
a__: Union[str, Any] = torch.cat((q_bias, torch.zeros_like(__lowerCAmelCase , requires_grad=__lowerCAmelCase ), v_bias) )
a__: Optional[int] = qkv_bias
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
a__: List[Any] = 364 if '''coco''' in model_name else 224
a__: Optional[Any] = BlipaVisionConfig(image_size=__lowerCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
a__: int = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=__lowerCAmelCase ).to_dict()
elif "opt-6.7b" in model_name:
a__: Optional[Any] = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=__lowerCAmelCase ).to_dict()
elif "t5-xl" in model_name:
a__: List[str] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
a__: Tuple = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
a__: Dict = BlipaConfig(vision_config=__lowerCAmelCase , text_config=__lowerCAmelCase )
return config, image_size
@torch.no_grad()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) ->int:
a__: str = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
a__: Tuple = tokenizer('\n' , add_special_tokens=__lowerCAmelCase ).input_ids[0]
a__: int = get_blipa_config(__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
a__: Dict = BlipaForConditionalGeneration(__lowerCAmelCase ).eval()
a__: Optional[Any] = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
a__: Tuple = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
a__: Dict = '''cuda''' if torch.cuda.is_available() else '''cpu'''
a__: List[str] = load_model_and_preprocess(
name=__lowerCAmelCase , model_type=__lowerCAmelCase , is_eval=__lowerCAmelCase , device=__lowerCAmelCase )
original_model.eval()
print('Done!' )
# update state dict keys
a__: Dict = original_model.state_dict()
a__: str = create_rename_keys(__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
a__: List[Any] = state_dict.pop(__lowerCAmelCase )
if key.startswith('Qformer.bert' ):
a__: Optional[int] = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
a__: Optional[int] = key.replace('self' , 'attention' )
if "opt_proj" in key:
a__: List[str] = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
a__: Optional[int] = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
a__: List[str] = key.replace('opt' , 'language' )
if key.startswith('t5' ):
a__: List[str] = key.replace('t5' , 'language' )
a__: int = val
# read in qv biases
read_in_q_v_bias(__lowerCAmelCase , __lowerCAmelCase )
a__: List[str] = hf_model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
assert len(__lowerCAmelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
a__: Tuple = load_demo_image()
a__: Tuple = vis_processors['''eval'''](__lowerCAmelCase ).unsqueeze(0 ).to(__lowerCAmelCase )
a__: int = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(__lowerCAmelCase )
# create processor
a__: Any = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase )
a__: Any = BlipaProcessor(image_processor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
a__: int = processor(images=__lowerCAmelCase , return_tensors='pt' ).pixel_values.to(__lowerCAmelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase )
original_model.to(__lowerCAmelCase )
hf_model.to(__lowerCAmelCase )
with torch.no_grad():
if "opt" in model_name:
a__: Optional[int] = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
a__: str = hf_model(__lowerCAmelCase , __lowerCAmelCase ).logits
else:
a__: Optional[int] = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
a__: Dict = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
a__: Optional[Any] = hf_model(__lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
a__: Any = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=__lowerCAmelCase )
assert torch.allclose(logits[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
a__: List[str] = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=__lowerCAmelCase )
else:
# cast to same type
a__: str = logits.dtype
assert torch.allclose(original_logits.to(__lowerCAmelCase ) , __lowerCAmelCase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
a__: Any = ''''''
a__: Dict = tokenizer(__lowerCAmelCase , return_tensors='pt' ).input_ids.to(__lowerCAmelCase )
a__: int = original_model.generate({'image': original_pixel_values} )
a__: Optional[Any] = hf_model.generate(
__lowerCAmelCase , __lowerCAmelCase , do_sample=__lowerCAmelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , __lowerCAmelCase )
a__: int = input_ids.shape[1]
a__: List[Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowerCAmelCase )
a__: Optional[int] = [text.strip() for text in output_text]
print('HF generation:' , __lowerCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if push_to_hub:
processor.push_to_hub(F'nielsr/{model_name}' )
hf_model.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
lowercase__ = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
lowercase__ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 362
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowercase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ = {
'vocab_file': {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt',
},
'tokenizer_file': {
'unc-nlp/lxmert-base-uncased': (
'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'
),
},
}
lowercase__ = {
'unc-nlp/lxmert-base-uncased': 512,
}
lowercase__ = {
'unc-nlp/lxmert-base-uncased': {'do_lower_case': True},
}
class __snake_case ( __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_INIT_CONFIGURATION
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = LxmertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
a__: Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , lowercase) != do_lower_case
or normalizer_state.get('strip_accents' , lowercase) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowercase) != tokenize_chinese_chars
):
a__: int = getattr(lowercase , normalizer_state.pop('type'))
a__: Dict = do_lower_case
a__: Dict = strip_accents
a__: Optional[int] = tokenize_chinese_chars
a__: List[Any] = normalizer_class(**lowercase)
a__: Optional[int] = do_lower_case
def lowerCamelCase_ ( self , lowercase , lowercase=None) -> Tuple:
'''simple docstring'''
a__: Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__: List[Any] = [self.sep_token_id]
a__: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
a__: List[Any] = self._tokenizer.model.save(lowercase , name=lowercase)
return tuple(lowercase)
| 203
| 0
|
def lowerCamelCase__ ( a , a ) -> int:
_A: List[str] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
_A: List[str] = n - k
# Calculate C(n,k)
for i in range(a ):
result *= n - i
result //= i + 1
return result
def lowerCamelCase__ ( a ) -> int:
return binomial_coefficient(2 * node_count , a ) // (node_count + 1)
def lowerCamelCase__ ( a ) -> int:
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
_A: Tuple = 1
for i in range(1 , n + 1 ):
result *= i
return result
def lowerCamelCase__ ( a ) -> int:
return catalan_number(a ) * factorial(a )
if __name__ == "__main__":
UpperCAmelCase__ : List[str] = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
F"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 121
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Any = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : List[Any] = '''informer'''
__UpperCamelCase : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : str = "student_t" , lowerCAmelCase_ : str = "nll" , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : List[int] = None , lowerCAmelCase_ : Optional[Union[str, bool]] = "mean" , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : int = 6_4 , lowerCAmelCase_ : int = 3_2 , lowerCAmelCase_ : int = 3_2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.05 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 1_0_0 , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : str = "prob" , lowerCAmelCase_ : int = 5 , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : str , ):
"""simple docstring"""
# time series specific configuration
_A: Optional[Any] = prediction_length
_A: Optional[Any] = context_length or prediction_length
_A: Dict = distribution_output
_A: List[str] = loss
_A: int = input_size
_A: List[str] = num_time_features
_A: Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
_A: str = scaling
_A: Optional[Any] = num_dynamic_real_features
_A: List[Any] = num_static_real_features
_A: Tuple = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
_A: str = cardinality
else:
_A: Union[str, Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
_A: List[str] = embedding_dimension
else:
_A: Union[str, Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_A: int = num_parallel_samples
# Transformer architecture configuration
_A: Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
_A: Union[str, Any] = d_model
_A: Optional[Any] = encoder_attention_heads
_A: Optional[Any] = decoder_attention_heads
_A: Optional[Any] = encoder_ffn_dim
_A: Union[str, Any] = decoder_ffn_dim
_A: Any = encoder_layers
_A: str = decoder_layers
_A: List[str] = dropout
_A: Any = attention_dropout
_A: Optional[int] = activation_dropout
_A: List[Any] = encoder_layerdrop
_A: str = decoder_layerdrop
_A: int = activation_function
_A: Tuple = init_std
_A: Union[str, Any] = use_cache
# Informer
_A: Union[str, Any] = attention_type
_A: str = sampling_factor
_A: List[str] = distil
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 121
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase: Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Any = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCAmelCase: Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365
|
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class a__:
def __init__( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[int]=2 , __snake_case : Union[str, Any]=8 , __snake_case : List[str]=True , __snake_case : Dict=True , __snake_case : Optional[Any]=True , __snake_case : List[str]=True , __snake_case : Tuple=99 , __snake_case : int=16 , __snake_case : Optional[int]=5 , __snake_case : int=2 , __snake_case : Tuple=36 , __snake_case : Optional[Any]="gelu" , __snake_case : str=0.0 , __snake_case : Optional[int]=0.0 , __snake_case : Tuple=5_12 , __snake_case : str=16 , __snake_case : str=2 , __snake_case : int=0.02 , __snake_case : Optional[int]=3 , __snake_case : List[Any]=4 , __snake_case : Any=None , ):
a : int = parent
a : Any = batch_size
a : Optional[int] = seq_length
a : List[str] = is_training
a : Dict = use_input_mask
a : Union[str, Any] = use_token_type_ids
a : Tuple = use_labels
a : Dict = vocab_size
a : Optional[int] = hidden_size
a : List[Any] = num_hidden_layers
a : Optional[Any] = num_attention_heads
a : str = intermediate_size
a : Dict = hidden_act
a : str = hidden_dropout_prob
a : Tuple = attention_probs_dropout_prob
a : Optional[Any] = max_position_embeddings
a : Tuple = type_vocab_size
a : int = type_sequence_label_size
a : List[Any] = initializer_range
a : List[str] = num_labels
a : List[str] = num_choices
a : Optional[Any] = scope
def lowercase_ ( self : Union[str, Any] ):
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Optional[Any] = None
if self.use_input_mask:
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a : Tuple = None
if self.use_token_type_ids:
a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a : str = None
a : int = None
a : Any = None
if self.use_labels:
a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
a : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : Union[str, Any] ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def lowercase_ ( self : List[str] ):
a : List[Any] = self.get_config()
a : Optional[Any] = 3_00
return config
def lowercase_ ( self : Union[str, Any] ):
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Optional[Any] = self.prepare_config_and_inputs()
a : Union[str, Any] = True
a : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase_ ( self : int , __snake_case : int , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Any ):
a : Dict = MraModel(config=__snake_case )
model.to(__snake_case )
model.eval()
a : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
a : List[str] = model(__snake_case , token_type_ids=__snake_case )
a : Union[str, Any] = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : List[str] , __snake_case : Tuple , __snake_case : List[str] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : List[Any] , ):
a : Optional[Any] = True
a : Optional[int] = MraModel(__snake_case )
model.to(__snake_case )
model.eval()
a : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , )
a : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , )
a : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Dict , __snake_case : Optional[Any] ):
a : Union[str, Any] = MraForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
a : List[str] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Tuple , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : int ):
a : Optional[int] = MraForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
a : Optional[int] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Dict , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : str ):
a : Tuple = self.num_labels
a : Dict = MraForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a : Any = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : str , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : int , __snake_case : List[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : int ):
a : Tuple = self.num_labels
a : Tuple = MraForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
a : List[Any] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : Any , __snake_case : Any , __snake_case : str , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Any , __snake_case : Any , __snake_case : str ):
a : Optional[int] = self.num_choices
a : int = MraForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
a : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : int = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self : Optional[Any] ):
a : Union[str, Any] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Union[str, Any] = config_and_inputs
a : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = ()
def lowercase_ ( self : Any ):
a : Tuple = MraModelTester(self )
a : str = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def lowercase_ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowercase_ ( self : List[str] ):
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowercase_ ( self : Any ):
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a : Dict = type
self.model_tester.create_and_check_model(*__snake_case )
def lowercase_ ( self : List[Any] ):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def lowercase_ ( self : Optional[Any] ):
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__snake_case )
def lowercase_ ( self : List[Any] ):
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def lowercase_ ( self : Tuple ):
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def lowercase_ ( self : Optional[Any] ):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@slow
def lowercase_ ( self : int ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = MraModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip(reason='MRA does not output attentions' )
def lowercase_ ( self : Union[str, Any] ):
return
@require_torch
class a__( unittest.TestCase ):
@slow
def lowercase_ ( self : Union[str, Any] ):
a : Union[str, Any] = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
a : List[str] = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
a : Optional[int] = model(__snake_case )[0]
a : Any = torch.Size((1, 2_56, 7_68) )
self.assertEqual(output.shape , __snake_case )
a : str = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
def lowercase_ ( self : Optional[int] ):
a : Dict = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
a : Optional[int] = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
a : Dict = model(__snake_case )[0]
a : Union[str, Any] = 5_02_65
a : Dict = torch.Size((1, 2_56, vocab_size) )
self.assertEqual(output.shape , __snake_case )
a : Dict = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
def lowercase_ ( self : Any ):
a : Dict = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
a : Optional[int] = torch.arange(40_96 ).unsqueeze(0 )
with torch.no_grad():
a : Tuple = model(__snake_case )[0]
a : List[Any] = 5_02_65
a : str = torch.Size((1, 40_96, vocab_size) )
self.assertEqual(output.shape , __snake_case )
a : int = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
| 96
| 0
|
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class _lowerCAmelCase ( snake_case_ ):
def __get__( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Dict:
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
snake_case : int = "__cached_" + self.fget.__name__
snake_case : Dict = getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if cached is None:
snake_case : Any = self.fget(UpperCamelCase__ )
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return cached
def __lowerCAmelCase ( lowercase : Any ) -> str:
"""simple docstring"""
snake_case : int = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'invalid truth value {val!r}' )
def __lowerCAmelCase ( lowercase : int ) -> str:
"""simple docstring"""
if is_torch_fx_proxy(lowercase ):
return True
if is_torch_available():
import torch
if isinstance(lowercase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowercase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowercase , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowercase , np.ndarray )
def __lowerCAmelCase ( lowercase : List[Any] ) -> int:
"""simple docstring"""
return isinstance(lowercase , np.ndarray )
def __lowerCAmelCase ( lowercase : Optional[int] ) -> Tuple:
"""simple docstring"""
return _is_numpy(lowercase )
def __lowerCAmelCase ( lowercase : Any ) -> int:
"""simple docstring"""
import torch
return isinstance(lowercase , torch.Tensor )
def __lowerCAmelCase ( lowercase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch(lowercase )
def __lowerCAmelCase ( lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
import torch
return isinstance(lowercase , torch.device )
def __lowerCAmelCase ( lowercase : int ) -> Optional[Any]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(lowercase )
def __lowerCAmelCase ( lowercase : Dict ) -> str:
"""simple docstring"""
import torch
if isinstance(lowercase , lowercase ):
if hasattr(lowercase , lowercase ):
snake_case : List[str] = getattr(lowercase , lowercase )
else:
return False
return isinstance(lowercase , torch.dtype )
def __lowerCAmelCase ( lowercase : Optional[int] ) -> int:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(lowercase )
def __lowerCAmelCase ( lowercase : List[str] ) -> Dict:
"""simple docstring"""
import tensorflow as tf
return isinstance(lowercase , tf.Tensor )
def __lowerCAmelCase ( lowercase : int ) -> Optional[Any]:
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(lowercase )
def __lowerCAmelCase ( lowercase : List[str] ) -> Optional[Any]:
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowercase , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(lowercase )
return type(lowercase ) == tf.Tensor
def __lowerCAmelCase ( lowercase : Dict ) -> List[Any]:
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowercase )
def __lowerCAmelCase ( lowercase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(lowercase , jnp.ndarray )
def __lowerCAmelCase ( lowercase : Dict ) -> Dict:
"""simple docstring"""
return False if not is_flax_available() else _is_jax(lowercase )
def __lowerCAmelCase ( lowercase : str ) -> List[str]:
"""simple docstring"""
if isinstance(lowercase , (dict, UserDict) ):
return {k: to_py_obj(lowercase ) for k, v in obj.items()}
elif isinstance(lowercase , (list, tuple) ):
return [to_py_obj(lowercase ) for o in obj]
elif is_tf_tensor(lowercase ):
return obj.numpy().tolist()
elif is_torch_tensor(lowercase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowercase ):
return np.asarray(lowercase ).tolist()
elif isinstance(lowercase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __lowerCAmelCase ( lowercase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowercase , (dict, UserDict) ):
return {k: to_numpy(lowercase ) for k, v in obj.items()}
elif isinstance(lowercase , (list, tuple) ):
return np.array(lowercase )
elif is_tf_tensor(lowercase ):
return obj.numpy()
elif is_torch_tensor(lowercase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowercase ):
return np.asarray(lowercase )
else:
return obj
class _lowerCAmelCase ( snake_case_ ):
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = fields(self )
# Safety and consistency checks
if not len(UpperCamelCase__ ):
raise ValueError(F'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'{self.__class__.__name__} should not have more than one required field.' )
snake_case : Tuple = getattr(self , class_fields[0].name )
snake_case : Union[str, Any] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Tuple = first_field.items()
snake_case : Dict = True
else:
try:
snake_case : Optional[Any] = iter(UpperCamelCase__ )
snake_case : int = True
except TypeError:
snake_case : Union[str, Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(UpperCamelCase__ ):
if (
not isinstance(UpperCamelCase__ , (list, tuple) )
or not len(UpperCamelCase__ ) == 2
or not isinstance(element[0] , UpperCamelCase__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
snake_case : Union[str, Any] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
snake_case : List[Any] = element[1]
elif first_field is not None:
snake_case : List[str] = first_field
else:
for field in class_fields:
snake_case : Union[str, Any] = getattr(self , field.name )
if v is not None:
snake_case : Optional[int] = v
def __delitem__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
raise Exception(F'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
raise Exception(F'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
raise Exception(F'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
raise Exception(F'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : int = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(UpperCamelCase__ , UpperCamelCase__ )
super().__setattr__(UpperCamelCase__ , UpperCamelCase__ )
def __setitem__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__setitem__(UpperCamelCase__ , UpperCamelCase__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase ( self ) -> Tuple[Any]:
'''simple docstring'''
return tuple(self[k] for k in self.keys() )
class _lowerCAmelCase ( snake_case_ , snake_case_ ):
@classmethod
def lowerCamelCase ( cls , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
raise ValueError(
F'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : str = '''longest'''
__UpperCAmelCase : Any = '''max_length'''
__UpperCAmelCase : List[str] = '''do_not_pad'''
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : List[Any] = '''pt'''
__UpperCAmelCase : Dict = '''tf'''
__UpperCAmelCase : Tuple = '''np'''
__UpperCAmelCase : List[str] = '''jax'''
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Any = context_managers
snake_case : List[str] = ExitStack()
def __enter__( self ) -> Optional[int]:
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(UpperCamelCase__ )
def __exit__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
self.stack.__exit__(*UpperCamelCase__ , **UpperCamelCase__ )
def __lowerCAmelCase ( lowercase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case : int = infer_framework(lowercase )
if framework == "tf":
snake_case : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
snake_case : Tuple = inspect.signature(model_class.forward ) # PyTorch models
else:
snake_case : Optional[Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __lowerCAmelCase ( lowercase : List[Any] ) -> int:
"""simple docstring"""
snake_case : Dict = model_class.__name__
snake_case : Dict = infer_framework(lowercase )
if framework == "tf":
snake_case : str = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
snake_case : Union[str, Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
snake_case : Union[str, Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __lowerCAmelCase ( lowercase : MutableMapping , lowercase : str = "" , lowercase : str = "." ) -> int:
"""simple docstring"""
def _flatten_dict(lowercase : str , lowercase : Union[str, Any]="" , lowercase : List[str]="." ):
for k, v in d.items():
snake_case : List[Any] = str(lowercase ) + delimiter + str(lowercase ) if parent_key else k
if v and isinstance(lowercase , lowercase ):
yield from flatten_dict(lowercase , lowercase , delimiter=lowercase ).items()
else:
yield key, v
return dict(_flatten_dict(lowercase , lowercase , lowercase ) )
@contextmanager
def __lowerCAmelCase ( lowercase : Optional[Any] , lowercase : bool = False ) -> str:
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __lowerCAmelCase ( lowercase : str , lowercase : Union[str, Any]=None ) -> int:
"""simple docstring"""
if is_numpy_array(lowercase ):
return np.transpose(lowercase , axes=lowercase )
elif is_torch_tensor(lowercase ):
return array.T if axes is None else array.permute(*lowercase )
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.transpose(lowercase , perm=lowercase )
elif is_jax_tensor(lowercase ):
return jnp.transpose(lowercase , axes=lowercase )
else:
raise ValueError(F'Type not supported for transpose: {type(lowercase )}.' )
def __lowerCAmelCase ( lowercase : Optional[Any] , lowercase : Tuple ) -> List[str]:
"""simple docstring"""
if is_numpy_array(lowercase ):
return np.reshape(lowercase , lowercase )
elif is_torch_tensor(lowercase ):
return array.reshape(*lowercase )
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.reshape(lowercase , lowercase )
elif is_jax_tensor(lowercase ):
return jnp.reshape(lowercase , lowercase )
else:
raise ValueError(F'Type not supported for reshape: {type(lowercase )}.' )
def __lowerCAmelCase ( lowercase : List[Any] , lowercase : Tuple=None ) -> Optional[int]:
"""simple docstring"""
if is_numpy_array(lowercase ):
return np.squeeze(lowercase , axis=lowercase )
elif is_torch_tensor(lowercase ):
return array.squeeze() if axis is None else array.squeeze(dim=lowercase )
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.squeeze(lowercase , axis=lowercase )
elif is_jax_tensor(lowercase ):
return jnp.squeeze(lowercase , axis=lowercase )
else:
raise ValueError(F'Type not supported for squeeze: {type(lowercase )}.' )
def __lowerCAmelCase ( lowercase : List[Any] , lowercase : int ) -> List[Any]:
"""simple docstring"""
if is_numpy_array(lowercase ):
return np.expand_dims(lowercase , lowercase )
elif is_torch_tensor(lowercase ):
return array.unsqueeze(dim=lowercase )
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.expand_dims(lowercase , axis=lowercase )
elif is_jax_tensor(lowercase ):
return jnp.expand_dims(lowercase , axis=lowercase )
else:
raise ValueError(F'Type not supported for expand_dims: {type(lowercase )}.' )
def __lowerCAmelCase ( lowercase : int ) -> Optional[Any]:
"""simple docstring"""
if is_numpy_array(lowercase ):
return np.size(lowercase )
elif is_torch_tensor(lowercase ):
return array.numel()
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.size(lowercase )
elif is_jax_tensor(lowercase ):
return array.size
else:
raise ValueError(F'Type not supported for expand_dims: {type(lowercase )}.' )
def __lowerCAmelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Tuple:
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(lowercase , (tuple, list) ):
snake_case : Dict = [F'{repo_id}--{v}' if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
snake_case : Dict = F'{repo_id}--{value}'
return auto_map
def __lowerCAmelCase ( lowercase : Optional[Any] ) -> Tuple:
"""simple docstring"""
for base_class in inspect.getmro(lowercase ):
snake_case : Optional[int] = base_class.__module__
snake_case : Any = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'Could not infer framework from class {model_class}.' )
| 203
|
"""simple docstring"""
def __lowerCAmelCase ( lowercase : int ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
snake_case : Dict = 4
snake_case : str = (1 << p) - 1
for _ in range(p - 2 ):
snake_case : Any = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 203
| 1
|
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__SCREAMING_SNAKE_CASE : Dict = NewType('DataClass', Any)
__SCREAMING_SNAKE_CASE : List[Any] = NewType('DataClassType', Any)
def _A ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def _A ( _SCREAMING_SNAKE_CASE ) -> Callable[[str], Any]:
snake_case_ = {str(_SCREAMING_SNAKE_CASE ): choice for choice in choices}
return lambda _SCREAMING_SNAKE_CASE : str_to_choice.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _A ( *,
_SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = dataclasses.MISSING , _SCREAMING_SNAKE_CASE = dataclasses.MISSING , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
snake_case_ = {}
if aliases is not None:
snake_case_ = aliases
if help is not None:
snake_case_ = help
return dataclasses.field(metadata=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , default_factory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class __A (snake_case__):
'''simple docstring'''
__lowercase: Iterable[DataClassType]
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Union[DataClassType, Iterable[DataClassType]] , **UpperCAmelCase_ : Optional[Any] ) ->str:
"""simple docstring"""
if "formatter_class" not in kwargs:
snake_case_ = ArgumentDefaultsHelpFormatter
super().__init__(**UpperCAmelCase_ )
if dataclasses.is_dataclass(UpperCAmelCase_ ):
snake_case_ = [dataclass_types]
snake_case_ = list(UpperCAmelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(UpperCAmelCase_ )
@staticmethod
def lowerCAmelCase ( UpperCAmelCase_ : ArgumentParser , UpperCAmelCase_ : dataclasses.Field ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = F"""--{field.name}"""
snake_case_ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , UpperCAmelCase_ ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
snake_case_ = kwargs.pop("""aliases""" , [] )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = [aliases]
snake_case_ = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(UpperCAmelCase_ , """UnionType""" ) and isinstance(UpperCAmelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(UpperCAmelCase_ ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
F""" Problem encountered in field '{field.name}'.""" )
if type(UpperCAmelCase_ ) not in field.type.__args__:
# filter `str` in Union
snake_case_ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
snake_case_ = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
snake_case_ = (
field.type.__args__[0] if isinstance(UpperCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
snake_case_ = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
snake_case_ = {}
if origin_type is Literal or (isinstance(field.type , UpperCAmelCase_ ) and issubclass(field.type , UpperCAmelCase_ )):
if origin_type is Literal:
snake_case_ = field.type.__args__
else:
snake_case_ = [x.value for x in field.type]
snake_case_ = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
snake_case_ = field.default
else:
snake_case_ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
snake_case_ = copy(UpperCAmelCase_ )
# Hack because type=bool in argparse does not behave as we want.
snake_case_ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
snake_case_ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
snake_case_ = default
# This tells argparse we accept 0 or 1 value after --field_name
snake_case_ = """?"""
# This is the value that will get picked if we do --field_name (without value)
snake_case_ = True
elif isclass(UpperCAmelCase_ ) and issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = field.type.__args__[0]
snake_case_ = """+"""
if field.default_factory is not dataclasses.MISSING:
snake_case_ = field.default_factory()
elif field.default is dataclasses.MISSING:
snake_case_ = True
else:
snake_case_ = field.type
if field.default is not dataclasses.MISSING:
snake_case_ = field.default
elif field.default_factory is not dataclasses.MISSING:
snake_case_ = field.default_factory()
else:
snake_case_ = True
parser.add_argument(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
snake_case_ = False
parser.add_argument(F"""--no_{field.name}""" , action="""store_false""" , dest=field.name , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : DataClassType ) ->int:
"""simple docstring"""
if hasattr(UpperCAmelCase_ , """_argument_group_name""" ):
snake_case_ = self.add_argument_group(dtype._argument_group_name )
else:
snake_case_ = self
try:
snake_case_ = get_type_hints(UpperCAmelCase_ )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(UpperCAmelCase_ ):
snake_case_ = """.""".join(map(UpperCAmelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(UpperCAmelCase_ ):
if not field.init:
continue
snake_case_ = type_hints[field.name]
self._parse_dataclass_field(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Union[str, Any]=None , ) ->Tuple[DataClass, ...]:
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
snake_case_ = []
if args_filename:
args_files.append(Path(UpperCAmelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
snake_case_ = ArgumentParser()
args_file_parser.add_argument(UpperCAmelCase_ , type=UpperCAmelCase_ , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
snake_case_ , snake_case_ = args_file_parser.parse_known_args(args=UpperCAmelCase_ )
snake_case_ = vars(UpperCAmelCase_ ).get(args_file_flag.lstrip("""-""" ) , UpperCAmelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(UpperCAmelCase_ ) for p in cmd_args_file_paths] )
snake_case_ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
snake_case_ = file_args + args if args is not None else file_args + sys.argv[1:]
snake_case_ , snake_case_ = self.parse_known_args(args=UpperCAmelCase_ )
snake_case_ = []
for dtype in self.dataclass_types:
snake_case_ = {f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init}
snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k in keys}
for k in keys:
delattr(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = dtype(**UpperCAmelCase_ )
outputs.append(UpperCAmelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(UpperCAmelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Dict[str, Any] , UpperCAmelCase_ : bool = False ) ->Tuple[DataClass, ...]:
"""simple docstring"""
snake_case_ = set(args.keys() )
snake_case_ = []
for dtype in self.dataclass_types:
snake_case_ = {f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init}
snake_case_ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
snake_case_ = dtype(**UpperCAmelCase_ )
outputs.append(UpperCAmelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(UpperCAmelCase_ )}""" )
return tuple(UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ) ->Tuple[DataClass, ...]:
"""simple docstring"""
with open(Path(UpperCAmelCase_ ) , encoding="""utf-8""" ) as open_json_file:
snake_case_ = json.loads(open_json_file.read() )
snake_case_ = self.parse_dict(UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ) ->Tuple[DataClass, ...]:
"""simple docstring"""
snake_case_ = self.parse_dict(yaml.safe_load(Path(UpperCAmelCase_ ).read_text() ) , allow_extra_keys=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
| 356
|
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 ) -> list:
snake_case_ = length or len(_SCREAMING_SNAKE_CASE )
snake_case_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
snake_case_ , snake_case_ = list_data[i + 1], list_data[i]
snake_case_ = True
return list_data if not swapped else bubble_sort(_SCREAMING_SNAKE_CASE , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233
| 0
|
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a__ ( lowercase : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def a__ ( lowercase : np.ndarray, lowercase : np.ndarray, lowercase : np.ndarray ) -> np.ndarray:
"""simple docstring"""
_UpperCamelCase = XGBRegressor(verbosity=0, random_state=42 )
xgb.fit(lowercase, lowercase )
# Predict target for test data
_UpperCamelCase = xgb.predict(lowercase )
_UpperCamelCase = predictions.reshape(len(lowercase ), 1 )
return predictions
def a__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase = fetch_california_housing()
_UpperCamelCase , _UpperCamelCase = data_handling(lowercase )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = train_test_split(
lowercase, lowercase, test_size=0.2_5, random_state=1 )
_UpperCamelCase = xgboost(lowercase, lowercase, lowercase )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(lowercase, lowercase )}""" )
print(F"""Mean Square Error : {mean_squared_error(lowercase, lowercase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 324
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]=7 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : Optional[Any]=18 , lowerCAmelCase__ : Union[str, Any]=30 , lowerCAmelCase__ : Any=400 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 18}
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Tuple = LevitImageProcessor if is_vision_available() else None
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = LevitImageProcessingTester(self )
@property
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 324
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
_lowercase : List[Any] = None
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
_lowercase : Any = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
_lowercase : int = {
'albert-base-v1': 5_12,
'albert-large-v1': 5_12,
'albert-xlarge-v1': 5_12,
'albert-xxlarge-v1': 5_12,
'albert-base-v2': 5_12,
'albert-large-v2': 5_12,
'albert-xlarge-v2': 5_12,
'albert-xxlarge-v2': 5_12,
}
_lowercase : Optional[Any] = '▁'
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[str] = AlbertTokenizer
def __init__( self : Optional[Any] , _lowercase : List[Any]=None , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=True , _lowercase : List[str]=True , _lowercase : Union[str, Any]=False , _lowercase : Tuple="[CLS]" , _lowercase : Optional[Any]="[SEP]" , _lowercase : List[Any]="<unk>" , _lowercase : Tuple="[SEP]" , _lowercase : Optional[Any]="<pad>" , _lowercase : Tuple="[CLS]" , _lowercase : Any="[MASK]" , **_lowercase : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__UpperCAmelCase = (
AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase , normalized=_lowercase )
if isinstance(_lowercase , _lowercase )
else mask_token
)
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = remove_space
__UpperCAmelCase = keep_accents
__UpperCAmelCase = vocab_file
__UpperCAmelCase = False if not self.vocab_file else True
def a ( self : int , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : int , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 86
|
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_lowercase : int = pd.read_csv('sample_data.csv', header=None)
_lowercase : str = df.shape[:1][0]
# If you're using some other dataset input the target column
_lowercase : Optional[int] = df.iloc[:, 1:2]
_lowercase : Optional[int] = actual_data.values.reshape(len_data, 1)
_lowercase : Any = MinMaxScaler().fit_transform(actual_data)
_lowercase : Dict = 10
_lowercase : List[str] = 5
_lowercase : Any = 20
_lowercase : Optional[int] = len_data - periods * look_back
_lowercase : Optional[int] = actual_data[:division]
_lowercase : Optional[int] = actual_data[division - look_back :]
_lowercase ,_lowercase : Tuple = [], []
_lowercase ,_lowercase : Optional[Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_lowercase : List[str] = np.array(train_x)
_lowercase : str = np.array(test_x)
_lowercase : Union[str, Any] = np.array([list(i.ravel()) for i in train_y])
_lowercase : List[Any] = np.array([list(i.ravel()) for i in test_y])
_lowercase : str = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
_lowercase : str = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
_lowercase : str = model.predict(x_test)
| 86
| 1
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCAmelCase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
lowercase = self.diffusers_dir
shutil.copy(
os.path.join(snake_case , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case=None ):
lowercase = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowercase = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowercase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowercase = black.format_str(snake_case , mode=snake_case )
lowercase = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(snake_case , 'w' , newline='\n' ) as f:
f.write(snake_case )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case )
with open(snake_case , 'r' ) as f:
self.assertTrue(f.read() , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , snake_case , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , snake_case ) , )
# Copy consistency with a really long name
lowercase = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub('Bert' , snake_case , snake_case ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , snake_case , overwrite_result=re.sub('DDPM' , 'Test' , snake_case ) , )
| 195
|
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self ):
lowercase = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_init_end' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_train_begin' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_train_end' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_epoch_begin' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_epoch_end' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_step_begin' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_step_end' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_evaluate' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_predict' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_save' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_log' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_prediction_step' )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 , snake_case=0 , snake_case=64 , snake_case=64 , snake_case=None , snake_case=False , **snake_case ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase = RegressionDataset(length=snake_case )
lowercase = RegressionDataset(length=snake_case )
lowercase = RegressionModelConfig(a=snake_case , b=snake_case )
lowercase = RegressionPreTrainedModel(snake_case )
lowercase = TrainingArguments(self.output_dir , disable_tqdm=snake_case , report_to=[] , **snake_case )
return Trainer(
snake_case , snake_case , train_dataset=snake_case , eval_dataset=snake_case , callbacks=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
self.assertEqual(len(snake_case ) , len(snake_case ) )
# Order doesn't matter
lowercase = sorted(snake_case , key=lambda snake_case : cb.__name__ if isinstance(snake_case , snake_case ) else cb.__class__.__name__ )
lowercase = sorted(snake_case , key=lambda snake_case : cb.__name__ if isinstance(snake_case , snake_case ) else cb.__class__.__name__ )
for cba, cba in zip(snake_case , snake_case ):
if isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ):
self.assertEqual(snake_case , snake_case )
elif isinstance(snake_case , snake_case ) and not isinstance(snake_case , snake_case ):
self.assertEqual(snake_case , cba.__class__ )
elif not isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ):
self.assertEqual(cba.__class__ , snake_case )
else:
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = ['on_init_end', 'on_train_begin']
lowercase = 0
lowercase = len(trainer.get_eval_dataloader() )
lowercase = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(snake_case ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_trainer()
lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# Callbacks passed at init are added to the default callbacks
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase = self.get_trainer(disable_tqdm=snake_case )
lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(snake_case )
expected_callbacks.remove(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
lowercase = self.get_trainer()
lowercase = trainer.pop_callback(snake_case )
self.assertEqual(cb.__class__ , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
trainer.add_callback(snake_case )
expected_callbacks.insert(0 , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# We can also add, pop, or remove by instance
lowercase = self.get_trainer()
lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(snake_case )
expected_callbacks.remove(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
lowercase = self.get_trainer()
lowercase = trainer.callback_handler.callbacks[0]
lowercase = trainer.pop_callback(snake_case )
self.assertEqual(snake_case , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
trainer.add_callback(snake_case )
expected_callbacks.insert(0 , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=snake_case )
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# Independent log/save/eval
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# A bit of everything
lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(snake_case ) in warn_mock.call_args[0][0]
| 195
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 78
|
from __future__ import annotations
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> list[list[int]]:
'''simple docstring'''
UpperCAmelCase : list[list[int]] =[]
create_all_state(1 , __lowerCAmelCase , __lowerCAmelCase , [] , __lowerCAmelCase )
return result
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )-> None:
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__lowerCAmelCase , total_number - level + 2 ):
current_list.append(__lowerCAmelCase )
create_all_state(i + 1 , __lowerCAmelCase , level - 1 , __lowerCAmelCase , __lowerCAmelCase )
current_list.pop()
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
for i in total_list:
print(*__lowerCAmelCase )
if __name__ == "__main__":
__snake_case = 4
__snake_case = 2
__snake_case = generate_all_combinations(n, k)
print_all_state(total_list)
| 78
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase_ : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
snake_case__ : Optional[str] = field(
default=_a , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
snake_case__ : Optional[str] = field(
default=_a , metadata={"""help""": """The column name of the images in the files."""} )
snake_case__ : Optional[str] = field(default=_a , metadata={"""help""": """A folder containing the training data."""} )
snake_case__ : Optional[str] = field(default=_a , metadata={"""help""": """A folder containing the validation data."""} )
snake_case__ : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
snake_case__ : Optional[int] = field(
default=_a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
snake_case__ : Optional[int] = field(
default=_a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def _A ( self : Dict ):
UpperCamelCase :Dict = {}
if self.train_dir is not None:
UpperCamelCase :Any = self.train_dir
if self.validation_dir is not None:
UpperCamelCase :List[Any] = self.validation_dir
UpperCamelCase :Optional[Any] = data_files if data_files else None
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : str = field(
default=_a , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
snake_case__ : Optional[str] = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
snake_case__ : Optional[str] = field(
default=_a , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
snake_case__ : Optional[str] = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
snake_case__ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
snake_case__ : str = field(default=_a , metadata={"""help""": """Name or path of preprocessor config."""} )
snake_case__ : bool = field(
default=_a , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
snake_case__ : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
snake_case__ : bool = field(
default=_a , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] ) -> Dict:
"""simple docstring"""
UpperCamelCase :Tuple = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
"""simple docstring"""
UpperCamelCase :Any = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase :Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase :str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , __magic_name__ , __magic_name__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase :List[str] = training_args.get_process_log_level()
logger.setLevel(__magic_name__ )
transformers.utils.logging.set_verbosity(__magic_name__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase :List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase :Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase :Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase :Optional[Any] = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __magic_name__ ) and data_args.train_val_split > 0.0:
UpperCamelCase :Any = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase :Optional[Any] = split["""train"""]
UpperCamelCase :int = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase :Tuple = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase :str = ViTMAEConfig.from_pretrained(model_args.config_name , **__magic_name__ )
elif model_args.model_name_or_path:
UpperCamelCase :List[Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
UpperCamelCase :int = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase :Any = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__magic_name__ )
elif model_args.model_name_or_path:
UpperCamelCase :List[str] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
UpperCamelCase :List[str] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase :List[Any] = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase :str = ViTMAEForPreTraining(__magic_name__ )
if training_args.do_train:
UpperCamelCase :int = ds["""train"""].column_names
else:
UpperCamelCase :List[str] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase :List[Any] = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase :Any = """image"""
elif "img" in column_names:
UpperCamelCase :Union[str, Any] = """img"""
else:
UpperCamelCase :Optional[int] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase :Union[str, Any] = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase :List[str] = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase :Optional[int] = Compose(
[
Lambda(lambda __magic_name__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__magic_name__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__magic_name__ : Union[str, Any] ):
UpperCamelCase :int = [transforms(__magic_name__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase :List[str] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__magic_name__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase :Dict = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__magic_name__ )
# Compute absolute learning rate
UpperCamelCase :Optional[Any] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase :Optional[int] = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase :str = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
UpperCamelCase :Optional[int] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase :int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase :Any = last_checkpoint
UpperCamelCase :int = trainer.train(resume_from_checkpoint=__magic_name__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase :List[str] = trainer.evaluate()
trainer.log_metrics("""eval""" , __magic_name__ )
trainer.save_metrics("""eval""" , __magic_name__ )
# Write model card and (optionally) push to hub
UpperCamelCase :Any = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__magic_name__ )
else:
trainer.create_model_card(**__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 38
|
import re
import string
import numpy as np
import datasets
UpperCAmelCase_ : Dict = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
UpperCAmelCase_ : Any = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
UpperCAmelCase_ : Tuple = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _A ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def _A ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCamelCase :str = np.array([re.sub(__lowerCamelCase , """""" , __lowerCamelCase ) for x in predictions] )
UpperCamelCase :Tuple = np.array([re.sub(__lowerCamelCase , """""" , __lowerCamelCase ) for x in references] )
else:
UpperCamelCase :Any = np.asarray(__lowerCamelCase )
UpperCamelCase :str = np.asarray(__lowerCamelCase )
if ignore_case:
UpperCamelCase :Tuple = np.char.lower(__lowerCamelCase )
UpperCamelCase :Any = np.char.lower(__lowerCamelCase )
if ignore_punctuation:
UpperCamelCase :Optional[int] = string.punctuation.maketrans("""""" , """""" , string.punctuation )
UpperCamelCase :Optional[Any] = np.char.translate(__lowerCamelCase , table=__lowerCamelCase )
UpperCamelCase :List[str] = np.char.translate(__lowerCamelCase , table=__lowerCamelCase )
if ignore_numbers:
UpperCamelCase :Tuple = string.digits.maketrans("""""" , """""" , string.digits )
UpperCamelCase :Dict = np.char.translate(__lowerCamelCase , table=__lowerCamelCase )
UpperCamelCase :Tuple = np.char.translate(__lowerCamelCase , table=__lowerCamelCase )
UpperCamelCase :int = predictions == references
return {"exact_match": np.mean(__lowerCamelCase ) * 100}
| 38
| 1
|
'''simple docstring'''
import pytest
__A : Union[str, Any] = '__dummy_dataset1__'
__A : Dict = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def UpperCAmelCase ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCAmelCase ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] ):
'''simple docstring'''
snake_case_ : int = dataset_loading_script_name
snake_case_ : Optional[int] = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase_ )
snake_case_ : Tuple = script_dir / F'''{script_name}.py'''
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
| 8
|
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
__A : Dict = 'src/transformers'
# Matches is_xxx_available()
__A : Dict = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
__A : Any = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__A : Tuple = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
__A : Optional[Any] = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
__A : Optional[int] = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__A : List[Any] = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
__A : Union[str, Any] = re.compile(r'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
__A : int = re.compile(r'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
__A : int = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
__A : List[Any] = re.compile(r'^\s*try:')
# Catches a line with else:
__A : Any = re.compile(r'^\s*else:')
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
if _re_test_backend.search(lowerCamelCase_ ) is None:
return None
snake_case_ : Tuple = [b[0] for b in _re_backend.findall(lowerCamelCase_ )]
backends.sort()
return "_and_".join(lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] ):
'''simple docstring'''
with open(lowerCamelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : str = f.readlines()
snake_case_ : List[Any] = 0
while line_index < len(lowerCamelCase_ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
snake_case_ : Union[str, Any] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
snake_case_ : str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase_ ):
snake_case_ : Optional[int] = _re_one_line_import_struct.search(lowerCamelCase_ ).groups()[0]
snake_case_ : Union[str, Any] = re.findall(R"""\[([^\]]+)\]""" , lowerCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
snake_case_ : Any = _re_import_struct_key_value.search(lowerCamelCase_ )
if single_line_import_search is not None:
snake_case_ : Optional[int] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowerCamelCase_ ) > 0]
objects.extend(lowerCamelCase_ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
snake_case_ : Union[str, Any] = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
snake_case_ : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case_ : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case_ : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
snake_case_ : List[Any] = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCamelCase_ ) is not None:
snake_case_ : Optional[int] = _re_import_struct_add_many.search(lowerCamelCase_ ).groups()[0].split(""", """ )
snake_case_ : List[str] = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0]
objects.extend(lowerCamelCase_ )
elif _re_between_brackets.search(lowerCamelCase_ ) is not None:
snake_case_ : List[str] = _re_between_brackets.search(lowerCamelCase_ ).groups()[0].split(""", """ )
snake_case_ : Any = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0]
objects.extend(lowerCamelCase_ )
elif _re_quote_object.search(lowerCamelCase_ ) is not None:
objects.append(_re_quote_object.search(lowerCamelCase_ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
snake_case_ : int = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
snake_case_ : List[Any] = []
while (
line_index < len(lowerCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
snake_case_ : Union[str, Any] = lines[line_index]
snake_case_ : Union[str, Any] = _re_import.search(lowerCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
snake_case_ : Dict = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
snake_case_ : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case_ : str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case_ : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
snake_case_ : Dict = lines[line_index]
snake_case_ : Any = _re_import.search(lowerCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
snake_case_ : int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :List[str] ):
'''simple docstring'''
def find_duplicates(lowerCamelCase_ :Union[str, Any] ):
return [k for k, v in collections.Counter(lowerCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
snake_case_ : Optional[int] = []
for key in import_dict_objects.keys():
snake_case_ : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
snake_case_ : List[str] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
snake_case_ : str = """base imports""" if key == """none""" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
snake_case_ : Any = os.path.join(lowerCamelCase_ , """__init__.py""" )
snake_case_ : Dict = parse_init(lowerCamelCase_ )
if objects is not None:
snake_case_ : Any = analyze_results(*lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
snake_case_ : Tuple = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(lowerCamelCase_ ) )
if len(lowerCamelCase_ ) > 0:
raise ValueError("""\n\n""".join(lowerCamelCase_ ) )
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = []
for path, directories, files in os.walk(lowerCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(lowerCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase_ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
snake_case_ : Tuple = str((Path(lowerCamelCase_ ) / folder).relative_to(lowerCamelCase_ ) )
snake_case_ : List[str] = short_path.replace(os.path.sep , """.""" )
submodules.append(lowerCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
snake_case_ : Dict = str((Path(lowerCamelCase_ ) / fname).relative_to(lowerCamelCase_ ) )
snake_case_ : List[str] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(lowerCamelCase_ )
return submodules
__A : List[Any] = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def UpperCAmelCase ( ):
'''simple docstring'''
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
snake_case_ : Union[str, Any] = direct_transformers_import(lowerCamelCase_ )
snake_case_ : List[str] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowerCamelCase_ , """__init__.py""" ) , """r""" ) as f:
snake_case_ : str = f.read()
import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , lowerCamelCase_ ) ) )
snake_case_ : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowerCamelCase_ ) > 0:
snake_case_ : str = """\n""".join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 8
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A__ : Optional[int] =logging.get_logger(__name__)
A__ : Union[str, Any] ={
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class UpperCAmelCase ( _UpperCAmelCase ):
_lowercase: int = """marian"""
_lowercase: List[str] = ["""past_key_values"""]
_lowercase: str = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Union[str, Any] , __snake_case : Tuple=5_81_01 , __snake_case : int=None , __snake_case : Optional[Any]=10_24 , __snake_case : Dict=12 , __snake_case : int=40_96 , __snake_case : List[Any]=16 , __snake_case : Optional[int]=12 , __snake_case : List[str]=40_96 , __snake_case : Tuple=16 , __snake_case : str=0.0 , __snake_case : str=0.0 , __snake_case : int=True , __snake_case : str=True , __snake_case : List[Any]="gelu" , __snake_case : Union[str, Any]=10_24 , __snake_case : List[Any]=0.1 , __snake_case : str=0.0 , __snake_case : Dict=0.0 , __snake_case : Optional[Any]=0.02 , __snake_case : List[Any]=5_81_00 , __snake_case : List[Any]=False , __snake_case : str=5_81_00 , __snake_case : Dict=0 , __snake_case : List[str]=0 , __snake_case : int=True , **__snake_case : List[str] , ) -> str:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = decoder_vocab_size or vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , forced_eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
class UpperCAmelCase ( _UpperCAmelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowercase__ ( self : Tuple ) -> Optional[Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_lowerCAmelCase = {0: """batch"""}
_lowerCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
_lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase_ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(UpperCAmelCase_ ):
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
else:
_lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super().outputs
else:
_lowerCAmelCase = super(UpperCAmelCase_ , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(UpperCAmelCase_ ):
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def lowercase__ ( self : List[Any] , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> str:
_lowerCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Generate decoder inputs
_lowerCAmelCase = seq_length if not self.use_past else 1
_lowerCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase = dict(**UpperCAmelCase_ , **UpperCAmelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["""input_ids"""].shape
_lowerCAmelCase = common_inputs["""decoder_input_ids"""].shape[1]
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = decoder_seq_length + 3
_lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(UpperCAmelCase_ , UpperCAmelCase_ )] , dim=1 )
_lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase = min(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = max(UpperCAmelCase_ , UpperCAmelCase_ ) - min_num_layers
_lowerCAmelCase = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(UpperCAmelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCAmelCase_ ),
torch.zeros(UpperCAmelCase_ ),
torch.zeros(UpperCAmelCase_ ),
torch.zeros(UpperCAmelCase_ ),
) )
# TODO: test this.
_lowerCAmelCase = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(UpperCAmelCase_ , UpperCAmelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCAmelCase_ ), torch.zeros(UpperCAmelCase_ )) )
return common_inputs
def lowercase__ ( self : List[Any] , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Any:
_lowerCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_lowerCAmelCase = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = common_inputs["""attention_mask"""].dtype
_lowerCAmelCase = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(UpperCAmelCase_ , UpperCAmelCase_ , dtype=UpperCAmelCase_ )] , dim=1 )
_lowerCAmelCase = [
(torch.zeros(UpperCAmelCase_ ), torch.zeros(UpperCAmelCase_ )) for _ in range(UpperCAmelCase_ )
]
return common_inputs
def lowercase__ ( self : Optional[int] , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Union[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase = compute_effective_axis_dimension(
UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase = tokenizer.num_special_tokens_to_add(UpperCAmelCase_ )
_lowerCAmelCase = compute_effective_axis_dimension(
UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase_ )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase = dict(tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ ) )
return common_inputs
def lowercase__ ( self : Union[str, Any] , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Any:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_ )
else:
_lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_ )
return common_inputs
def lowercase__ ( self : str , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Any , __snake_case : List[str] ) -> str:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super()._flatten_past_key_values_(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
_lowerCAmelCase = super(UpperCAmelCase_ , self )._flatten_past_key_values_(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@property
def lowercase__ ( self : List[Any] ) -> Dict:
return 1E-4
| 70
|
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
def update_area_of_max_square(UpperCamelCase_ , UpperCamelCase_ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
SCREAMING_SNAKE_CASE__ = update_area_of_max_square(UpperCamelCase_ , col + 1 )
SCREAMING_SNAKE_CASE__ = update_area_of_max_square(row + 1 , col + 1 )
SCREAMING_SNAKE_CASE__ = update_area_of_max_square(row + 1 , UpperCamelCase_ )
if mat[row][col]:
SCREAMING_SNAKE_CASE__ = 1 + min([right, diagonal, down] )
SCREAMING_SNAKE_CASE__ = max(largest_square_area[0] , UpperCamelCase_ )
return sub_problem_sol
else:
return 0
SCREAMING_SNAKE_CASE__ = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
SCREAMING_SNAKE_CASE__ = update_area_of_max_square_using_dp_array(UpperCamelCase_ , col + 1 , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = update_area_of_max_square_using_dp_array(row + 1 , UpperCamelCase_ , UpperCamelCase_ )
if mat[row][col]:
SCREAMING_SNAKE_CASE__ = 1 + min([right, diagonal, down] )
SCREAMING_SNAKE_CASE__ = max(largest_square_area[0] , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = sub_problem_sol
return sub_problem_sol
else:
return 0
SCREAMING_SNAKE_CASE__ = [0]
SCREAMING_SNAKE_CASE__ = [[-1] * cols for _ in range(UpperCamelCase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , UpperCamelCase_ )
return largest_square_area[0]
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [[0] * (cols + 1) for _ in range(rows + 1 )]
SCREAMING_SNAKE_CASE__ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
SCREAMING_SNAKE_CASE__ = dp_array[row][col + 1]
SCREAMING_SNAKE_CASE__ = dp_array[row + 1][col + 1]
SCREAMING_SNAKE_CASE__ = dp_array[row + 1][col]
if mat[row][col] == 1:
SCREAMING_SNAKE_CASE__ = 1 + min(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = max(dp_array[row][col] , UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = 0
return largest_square_area
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0] * (cols + 1)
SCREAMING_SNAKE_CASE__ = [0] * (cols + 1)
SCREAMING_SNAKE_CASE__ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
SCREAMING_SNAKE_CASE__ = current_row[col + 1]
SCREAMING_SNAKE_CASE__ = next_row[col + 1]
SCREAMING_SNAKE_CASE__ = next_row[col]
if mat[row][col] == 1:
SCREAMING_SNAKE_CASE__ = 1 + min(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = max(current_row[col] , UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 176
| 0
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class lowercase:
'''simple docstring'''
def __init__( self: Optional[Any], a_: Dict, a_: List[str]=3, a_: Optional[Any]=7, a_: Any=True, a_: str=True, a_: Union[str, Any]=False, a_: Any=True, a_: List[Any]=99, a_: List[str]=32, a_: Any=5, a_: Optional[Any]=4, a_: List[str]=37, a_: List[str]="gelu", a_: Tuple=0.1, a_: str=0.1, a_: int=512, a_: List[str]=16, a_: Union[str, Any]=2, a_: Union[str, Any]=0.02, a_: List[Any]=3, a_: List[str]=4, a_: List[str]=None, ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : int = batch_size
_snake_case : Optional[Any] = seq_length
_snake_case : str = is_training
_snake_case : int = use_input_mask
_snake_case : Union[str, Any] = use_token_type_ids
_snake_case : Optional[Any] = use_labels
_snake_case : List[Any] = vocab_size
_snake_case : Tuple = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : int = intermediate_size
_snake_case : Optional[int] = hidden_act
_snake_case : Any = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : Optional[Any] = max_position_embeddings
_snake_case : List[str] = type_vocab_size
_snake_case : Tuple = type_sequence_label_size
_snake_case : Tuple = initializer_range
_snake_case : Any = num_labels
_snake_case : Optional[int] = num_choices
_snake_case : Tuple = scope
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_snake_case : List[Any] = None
if self.use_input_mask:
_snake_case : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : Dict = None
_snake_case : Any = None
_snake_case : List[Any] = None
_snake_case : Tuple = None
if self.use_labels:
_snake_case : Union[str, Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
_snake_case : List[str] = ids_tensor([self.batch_size], self.num_choices )
_snake_case : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=a_, initializer_range=self.initializer_range, pad_token_id=1, new_decoder_architecture=a_, )
def UpperCamelCase_ ( self: Any, a_: Any, a_: Optional[int], a_: str, a_: Union[str, Any], a_: Tuple, a_: Any, a_: List[Any] ):
'''simple docstring'''
_snake_case : Tuple = FalconModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_, attention_mask=a_ )
_snake_case : Tuple = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: List[Any], a_: List[str], a_: Union[str, Any], a_: Optional[Any], a_: Union[str, Any], a_: Any, a_: List[Any], a_: int, a_: Tuple, a_: int, ):
'''simple docstring'''
_snake_case : List[str] = True
_snake_case : Any = FalconModel(a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = model(
a_, attention_mask=a_, encoder_hidden_states=a_, encoder_attention_mask=a_, )
_snake_case : Any = model(
a_, attention_mask=a_, encoder_hidden_states=a_, )
_snake_case : Optional[int] = model(a_, attention_mask=a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: Any, a_: str, a_: Any, a_: Optional[int], a_: Union[str, Any], a_: Union[str, Any], a_: Union[str, Any], a_: List[str], a_: List[str], a_: List[str], ):
'''simple docstring'''
_snake_case : Optional[int] = FalconForCausalLM(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Tuple = model(a_, attention_mask=a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self: List[Any], a_: Union[str, Any], a_: Optional[Any], a_: Optional[int], a_: Any, a_: List[Any], a_: List[str], a_: List[str], a_: Tuple, a_: Optional[Any], ):
'''simple docstring'''
_snake_case : Optional[Any] = True
_snake_case : List[Any] = True
_snake_case : Tuple = FalconForCausalLM(config=a_ )
model.to(a_ )
model.eval()
# first forward pass
_snake_case : str = model(
a_, attention_mask=a_, encoder_hidden_states=a_, encoder_attention_mask=a_, use_cache=a_, )
_snake_case : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_snake_case : int = ids_tensor((self.batch_size, 3), config.vocab_size )
_snake_case : int = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
_snake_case : int = torch.cat([input_ids, next_tokens], dim=-1 )
_snake_case : Optional[int] = torch.cat([input_mask, next_mask], dim=-1 )
_snake_case : Optional[int] = model(
a_, attention_mask=a_, encoder_hidden_states=a_, encoder_attention_mask=a_, output_hidden_states=a_, )["""hidden_states"""][0]
_snake_case : str = model(
a_, attention_mask=a_, encoder_hidden_states=a_, encoder_attention_mask=a_, past_key_values=a_, output_hidden_states=a_, )["""hidden_states"""][0]
# select random slice
_snake_case : Optional[int] = ids_tensor((1,), output_from_past.shape[-1] ).item()
_snake_case : int = output_from_no_past[:, -3:, random_slice_idx].detach()
_snake_case : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_, a_, atol=1E-3 ) )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Tuple = config_and_inputs
_snake_case : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ = (FalconForCausalLM,) if is_torch_available() else ()
lowercase__ = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[Any] = FalconModelTester(self )
_snake_case : Union[str, Any] = ConfigTester(self, config_class=a_, hidden_size=37 )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case , *_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
_snake_case : str = alibi
self.model_tester.create_and_check_model(a_, *a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Optional[Any] = 3
_snake_case : List[Any] = input_dict["""input_ids"""]
_snake_case : List[str] = input_ids.ne(1 ).to(a_ )
_snake_case : Tuple = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
_snake_case : Optional[int] = FalconForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Any = model(a_, attention_mask=a_, labels=a_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : List[Any] = 3
_snake_case : Optional[int] = """single_label_classification"""
_snake_case : Any = input_dict["""input_ids"""]
_snake_case : Optional[int] = input_ids.ne(1 ).to(a_ )
_snake_case : Tuple = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
_snake_case : Tuple = FalconForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = model(a_, attention_mask=a_, labels=a_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : str = input_dict["""input_ids"""]
_snake_case : Tuple = FalconForCausalLM(a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = model(a_, use_cache=a_ )
_snake_case : Union[str, Any] = input_ids.shape[0]
_snake_case : Tuple = model._convert_to_rw_cache(result.past_key_values )
_snake_case : Optional[Any] = model._convert_cache_to_standard_format(a_, a_ )
for layer in range(len(a_ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case , _snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = 3
_snake_case : List[Any] = """multi_label_classification"""
_snake_case : Tuple = input_dict["""input_ids"""]
_snake_case : str = input_ids.ne(1 ).to(a_ )
_snake_case : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
_snake_case : Optional[int] = FalconForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[int] = model(a_, attention_mask=a_, labels=a_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
for model_class in self.all_generative_model_classes:
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(a_, """use_cache""" ):
return
_snake_case : int = model_class(a_ ).to(a_ )
if "use_cache" not in inputs:
_snake_case : List[Any] = True
_snake_case : Optional[Any] = model(**a_ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
_snake_case : int = (
getattr(a_, """decoder_layers""", a_ )
or getattr(a_, """num_decoder_layers""", a_ )
or config.num_hidden_layers
)
_snake_case : List[Any] = getattr(a_, """num_kv_heads""", config.num_attention_heads )
_snake_case : Optional[Any] = getattr(a_, """d_model""", config.hidden_size )
_snake_case : Union[str, Any] = embed_dim // num_attention_heads
_snake_case : Tuple = outputs["""past_key_values"""]
self.assertEqual(len(a_ ), a_ )
_snake_case , _snake_case : List[Any] = inputs["""input_ids"""].shape
for i in range(a_ ):
if config.new_decoder_architecture:
_snake_case : int = config.num_attention_heads
elif config.multi_query:
_snake_case : Any = 1
self.assertEqual(len(past_kv[0] ), 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class lowercase( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Tuple = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
_snake_case : str = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
model.eval()
model.to(a_ )
_snake_case : Union[str, Any] = tokenizer("""My favorite food is""", return_tensors="""pt""" ).to(a_ )
_snake_case : List[Any] = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
_snake_case : Optional[Any] = model.generate(**a_, do_sample=a_, max_new_tokens=19 )
_snake_case : List[str] = tokenizer.batch_decode(a_ )[0]
self.assertEqual(a_, a_ )
@slow
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
_snake_case : str = AutoTokenizer.from_pretrained(a_ )
_snake_case : List[str] = FalconForCausalLM.from_pretrained(a_ )
model.eval()
model.to(a_ )
_snake_case : Any = tokenizer("""My favorite food is""", return_tensors="""pt""" ).to(a_ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**a_, do_sample=a_, max_new_tokens=4 )
model.generate(**a_, do_sample=a_, max_new_tokens=4 )
model.generate(**a_, num_beams=2, max_new_tokens=4 )
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
_snake_case : Any = AutoTokenizer.from_pretrained(a_ )
_snake_case : int = FalconForCausalLM.from_pretrained(a_ )
model.eval()
model.to(device=a_ )
_snake_case : Optional[int] = tokenizer("""My favorite food is""", return_tensors="""pt""" ).to(a_ )
# Test results are the same with and without cache
_snake_case : Optional[int] = model.generate(**a_, do_sample=a_, max_new_tokens=20, use_cache=a_ )
_snake_case : List[Any] = model.generate(**a_, do_sample=a_, max_new_tokens=20, use_cache=a_ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 132
|
"""simple docstring"""
from __future__ import annotations
from math import gcd
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : int = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> int:
return (pow(snake_case__ , 2 ) + step) % modulus
for _ in range(snake_case__ ):
# These track the position within the cycle detection logic.
_snake_case : Optional[int] = seed
_snake_case : str = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
_snake_case : Any = rand_fn(snake_case__ , snake_case__ , snake_case__ )
_snake_case : Optional[Any] = rand_fn(snake_case__ , snake_case__ , snake_case__ )
_snake_case : int = rand_fn(snake_case__ , snake_case__ , snake_case__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
_snake_case : str = gcd(hare - tortoise , snake_case__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
_snake_case : Union[str, Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
A_ = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
A_ = parser.parse_args()
A_ = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
A_ = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 132
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.