code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : int | None = None , snake_case_ : int | None = None ) -> None: '''simple docstring''' if start is None: UpperCAmelCase_ = 0 if end is None: UpperCAmelCase_ = len(snake_case_ ) - 1 if start >= end: return UpperCAmelCase_ = (start + end) // 2 slowsort(snake_case_ , snake_case_ , snake_case_ ) slowsort(snake_case_ , mid + 1 , snake_case_ ) if sequence[end] < sequence[mid]: UpperCAmelCase_ , UpperCAmelCase_ = sequence[mid], sequence[end] slowsort(snake_case_ , snake_case_ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
1
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' __a = """hf-internal-testing/tiny-random-t5""" __a = AutoTokenizer.from_pretrained(__lowercase ) __a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ) __a = tokenizer("""This is me""" , return_tensors="""pt""" ) __a = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) __a = model.generate(**__lowercase ) __a = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowercase ) __a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) __a = model_reloaded.generate(**__lowercase ) self.assertTrue(torch.allclose(__lowercase , __lowercase ) ) def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' __a = """hf-internal-testing/tiny-random-t5""" __a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ) __a = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(__lowercase ): model.save_pretrained(__lowercase ) __a = model.reverse_bettertransformer() model.save_pretrained(__lowercase )
302
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Dict = logging.get_logger(__name__) lowerCamelCase : Any = { 'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json', } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[str] = """lxmert""" lowerCAmelCase__ : Tuple = {} def __init__(self : Union[str, Any] , UpperCamelCase : int=30522 , UpperCamelCase : Optional[Any]=768 , UpperCamelCase : Optional[int]=12 , UpperCamelCase : List[Any]=9500 , UpperCamelCase : Tuple=1600 , UpperCamelCase : List[Any]=400 , UpperCamelCase : Dict=3072 , UpperCamelCase : Tuple="gelu" , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : str=512 , UpperCamelCase : Union[str, Any]=2 , UpperCamelCase : int=0.02 , UpperCamelCase : str=1E-12 , UpperCamelCase : List[str]=9 , UpperCamelCase : Dict=5 , UpperCamelCase : Optional[int]=5 , UpperCamelCase : str=2048 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Optional[int]=6.67 , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Dict=True , UpperCamelCase : Tuple=True , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : List[Any]=True , UpperCamelCase : str=True , UpperCamelCase : List[str]=True , **UpperCamelCase : int , ): '''simple docstring''' lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = num_qa_labels lowercase__ = num_object_labels lowercase__ = num_attr_labels lowercase__ = l_layers lowercase__ = x_layers lowercase__ = r_layers lowercase__ = visual_feat_dim lowercase__ = visual_pos_dim lowercase__ = visual_loss_normalizer lowercase__ = task_matched lowercase__ = task_mask_lm lowercase__ = task_obj_predict lowercase__ = task_qa lowercase__ = visual_obj_loss lowercase__ = visual_attr_loss lowercase__ = visual_feat_loss lowercase__ = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers} super().__init__(**UpperCamelCase )
2
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig lowerCamelCase__ = { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Optional[Any] ='albert' def __init__( self : Optional[Any] , __lowercase : Union[str, Any]=30000 , __lowercase : List[str]=128 , __lowercase : Optional[Any]=4096 , __lowercase : Dict=12 , __lowercase : Any=1 , __lowercase : Optional[Any]=64 , __lowercase : Any=16384 , __lowercase : Any=1 , __lowercase : Union[str, Any]="gelu_new" , __lowercase : List[str]=0 , __lowercase : int=0 , __lowercase : Dict=512 , __lowercase : str=2 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=1E-12 , __lowercase : int=0.1 , __lowercase : Any="absolute" , __lowercase : Optional[int]=0 , __lowercase : Dict=2 , __lowercase : Optional[Any]=3 , **__lowercase : Any , ): '''simple docstring''' super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) __a = vocab_size __a = embedding_size __a = hidden_size __a = num_hidden_layers __a = num_hidden_groups __a = num_attention_heads __a = inner_group_num __a = hidden_act __a = intermediate_size __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = type_vocab_size __a = initializer_range __a = layer_norm_eps __a = classifier_dropout_prob __a = position_embedding_type class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): @property def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' if self.task == "multiple-choice": __a = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __a = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
302
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : Any = logging.get_logger(__name__) lowercase : int = { 'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json', 'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json', } class A ( __snake_case ): __magic_name__ = '''falcon''' __magic_name__ = ['''past_key_values'''] def __init__( self , SCREAMING_SNAKE_CASE=65024 , SCREAMING_SNAKE_CASE=4544 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=71 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=11 , SCREAMING_SNAKE_CASE=11 , **SCREAMING_SNAKE_CASE , ) -> Tuple: """simple docstring""" A : Tuple = vocab_size # Backward compatibility with n_embed kwarg A : Dict = kwargs.pop('''n_embed''' , SCREAMING_SNAKE_CASE ) A : int = hidden_size if n_embed is None else n_embed A : Dict = num_hidden_layers A : List[Any] = num_attention_heads A : Optional[Any] = layer_norm_epsilon A : List[Any] = initializer_range A : str = use_cache A : Optional[int] = hidden_dropout A : Union[str, Any] = attention_dropout A : Dict = bos_token_id A : List[Any] = eos_token_id A : List[str] = num_attention_heads if num_kv_heads is None else num_kv_heads A : int = alibi A : List[str] = new_decoder_architecture A : Union[str, Any] = multi_query # Ignored when new_decoder_architecture is True A : int = parallel_attn A : Union[str, Any] = bias super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" return self.hidden_size // self.num_attention_heads @property def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" return not self.alibi
3
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCamelCase__ = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
302
0
'''simple docstring''' from maths.prime_check import is_prime def a_ ( lowerCamelCase : int ): if not isinstance(lowerCamelCase , lowerCamelCase ): lowerCAmelCase = f'''Input value of [number={number}] must be an integer''' raise TypeError(lowerCamelCase ) if is_prime(lowerCamelCase ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
4
class SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , __lowercase : Union[str, Any] ): '''simple docstring''' __a = val __a = None __a = None def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any ): '''simple docstring''' if self.val: if val < self.val: if self.left is None: __a = Node(__lowercase ) else: self.left.insert(__lowercase ) elif val > self.val: if self.right is None: __a = Node(__lowercase ) else: self.right.insert(__lowercase ) else: __a = val def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" if root: inorder(root.left , _SCREAMING_SNAKE_CASE ) res.append(root.val ) inorder(root.right , _SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" if len(_SCREAMING_SNAKE_CASE ) == 0: return arr __a = Node(arr[0] ) for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): root.insert(arr[i] ) # Traverse BST in order. __a = [] inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
302
0
from __future__ import annotations def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None: """simple docstring""" _lowercase =len(__snake_case ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(__snake_case ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __snake_case , __snake_case , ) def UpperCAmelCase_ ( __snake_case ) -> None: """simple docstring""" _lowercase =[] depth_first_search([] , [] , [] , __snake_case , __snake_case ) # Print all the boards for board in boards: for column in board: print(__snake_case ) print('''''' ) print(len(__snake_case ) , '''solutions were found.''' ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
5
import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): def UpperCamelCase_ ( self : str ): '''simple docstring''' __a = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__lowercase , """width_multiplier""" ) ) class SCREAMING_SNAKE_CASE : def __init__( self : Dict , __lowercase : Union[str, Any] , __lowercase : Dict=13 , __lowercase : int=64 , __lowercase : Tuple=2 , __lowercase : Tuple=3 , __lowercase : Tuple="swish" , __lowercase : List[Any]=3 , __lowercase : List[str]=32 , __lowercase : int=0.1 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[int]=True , __lowercase : Dict=True , __lowercase : Tuple=10 , __lowercase : str=None , __lowercase : Optional[Any]=0.25 , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0 , ): '''simple docstring''' __a = parent __a = batch_size __a = image_size __a = patch_size __a = num_channels __a = make_divisible(512 * width_multiplier , divisor=8 ) __a = hidden_act __a = conv_kernel_size __a = output_stride __a = classifier_dropout_prob __a = use_labels __a = is_training __a = num_labels __a = initializer_range __a = scope __a = width_multiplier __a = ffn_dropout __a = attn_dropout def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' __a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a = None __a = None if self.use_labels: __a = ids_tensor([self.batch_size] , self.num_labels ) __a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __a = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple ): '''simple docstring''' __a = MobileViTVaModel(config=__lowercase ) model.to(__lowercase ) model.eval() __a = model(__lowercase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] ): '''simple docstring''' __a = self.num_labels __a = MobileViTVaForImageClassification(__lowercase ) model.to(__lowercase ) model.eval() __a = model(__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : int , __lowercase : str , __lowercase : Any , __lowercase : int , __lowercase : List[str] ): '''simple docstring''' __a = self.num_labels __a = MobileViTVaForSemanticSegmentation(__lowercase ) model.to(__lowercase ) model.eval() __a = model(__lowercase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) __a = model(__lowercase , labels=__lowercase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCamelCase_ ( self : Dict ): '''simple docstring''' __a = self.prepare_config_and_inputs() __a , __a , __a , __a = config_and_inputs __a = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : List[Any] =( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) __lowerCamelCase : Any =( { 'feature-extraction': MobileViTVaModel, 'image-classification': MobileViTVaForImageClassification, 'image-segmentation': MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) __lowerCamelCase : Dict =False __lowerCamelCase : Optional[Any] =False __lowerCamelCase : int =False __lowerCamelCase : Any =False def UpperCamelCase_ ( self : Dict ): '''simple docstring''' __a = MobileViTVaModelTester(self ) __a = MobileViTVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase ) def UpperCamelCase_ ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" ) def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" ) def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' pass @unittest.skip(reason="""MobileViTV2 does not output attentions""" ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" ) def UpperCamelCase_ ( self : int ): '''simple docstring''' pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' pass def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = model_class(__lowercase ) __a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a = [*signature.parameters.keys()] __a = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __lowercase ) def UpperCamelCase_ ( self : Dict ): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def UpperCamelCase_ ( self : int ): '''simple docstring''' def check_hidden_states_output(__lowercase : List[str] , __lowercase : Optional[int] , __lowercase : List[str] ): __a = model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): __a = model(**self._prepare_for_class(__lowercase , __lowercase ) ) __a = outputs.hidden_states __a = 5 self.assertEqual(len(__lowercase ) , __lowercase ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. __a = 2 for i in range(len(__lowercase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a = True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowercase ) def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase ) @slow def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a = MobileViTVaModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def lowerCAmelCase__ ( ): """simple docstring""" __a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self : Dict ): '''simple docstring''' return ( MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ) if is_vision_available() else None ) @slow def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' __a = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to( __lowercase ) __a = self.default_image_processor __a = prepare_img() __a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase ) # forward pass with torch.no_grad(): __a = model(**__lowercase ) # verify the logits __a = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowercase ) __a = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) ) @slow def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) __a = model.to(__lowercase ) __a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) __a = prepare_img() __a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase ) # forward pass with torch.no_grad(): __a = model(**__lowercase ) __a = outputs.logits # verify the logits __a = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , __lowercase ) __a = torch.tensor( [ [[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]], [[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]], [[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]], ] , device=__lowercase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) ) @slow def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) __a = model.to(__lowercase ) __a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) __a = prepare_img() __a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase ) # forward pass with torch.no_grad(): __a = model(**__lowercase ) __a = outputs.logits.detach().cpu() __a = image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] ) __a = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , __lowercase ) __a = image_processor.post_process_semantic_segmentation(outputs=__lowercase ) __a = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , __lowercase )
302
0
import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class __A: @staticmethod def SCREAMING_SNAKE_CASE_ ( *_snake_case , **_snake_case ) -> Optional[int]: '''simple docstring''' pass def __lowerCAmelCase ( a__ ) -> str: __a = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class __A( unittest.TestCase ): snake_case_ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Tuple: '''simple docstring''' __a = DepthEstimationPipeline(model=_snake_case , image_processor=_snake_case ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _snake_case ) import datasets __a = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) __a = depth_estimator( [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] ) self.assertEqual( [ {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, ] , _snake_case , ) @require_tf @unittest.skip('''Depth estimation is not implemented in TF''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' pass @slow @require_torch def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = '''Intel/dpt-large''' __a = pipeline('''depth-estimation''' , model=_snake_case ) __a = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) __a = hashimage(outputs['''depth'''] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 ) @require_torch def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
6
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
302
0
import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=1024 ) -> Union[str, Any]: '''simple docstring''' A__ , A__ = [], [] A__ = list(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) A__ , A__ = sorted_examples[0] def is_too_big(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): return tok(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): A__ = new_src + ' ' + src A__ = new_tgt + ' ' + tgt if is_too_big(SCREAMING_SNAKE_CASE__ ) or is_too_big(SCREAMING_SNAKE_CASE__ ): # cant fit, finalize example finished_src.append(SCREAMING_SNAKE_CASE__ ) finished_tgt.append(SCREAMING_SNAKE_CASE__ ) A__ , A__ = src, tgt else: # can fit, keep adding A__ , A__ = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(SCREAMING_SNAKE_CASE__ ) finished_tgt.append(SCREAMING_SNAKE_CASE__ ) return finished_src, finished_tgt def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]: '''simple docstring''' A__ = Path(SCREAMING_SNAKE_CASE__ ) save_path.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) for split in ["train"]: A__ , A__ = data_dir / f'{split}.source', data_dir / f'{split}.target' A__ = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE__ ).open().readlines()] A__ = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE__ ).open().readlines()] A__ , A__ = pack_examples(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) print(f'packed {split} split from {len(SCREAMING_SNAKE_CASE__ )} examples -> {len(SCREAMING_SNAKE_CASE__ )}.' ) Path(save_path / f'{split}.source' ).open('w' ).write('\n'.join(SCREAMING_SNAKE_CASE__ ) ) Path(save_path / f'{split}.target' ).open('w' ).write('\n'.join(SCREAMING_SNAKE_CASE__ ) ) for split in ["val", "test"]: A__ , A__ = data_dir / f'{split}.source', data_dir / f'{split}.target' shutil.copyfile(SCREAMING_SNAKE_CASE__ , save_path / f'{split}.source' ) shutil.copyfile(SCREAMING_SNAKE_CASE__ , save_path / f'{split}.target' ) def _snake_case( ) -> Optional[int]: '''simple docstring''' A__ = argparse.ArgumentParser() parser.add_argument('--tok_name' , type=SCREAMING_SNAKE_CASE__ , help='like facebook/bart-large-cnn,t5-base, etc.' ) parser.add_argument('--max_seq_len' , type=SCREAMING_SNAKE_CASE__ , default=128 ) parser.add_argument('--data_dir' , type=SCREAMING_SNAKE_CASE__ ) parser.add_argument('--save_path' , type=SCREAMING_SNAKE_CASE__ ) A__ = parser.parse_args() A__ = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(SCREAMING_SNAKE_CASE__ , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
7
import string import numpy def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" return b if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE ) class SCREAMING_SNAKE_CASE : __lowerCamelCase : List[str] =string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) __lowerCamelCase : List[Any] =numpy.vectorize(lambda lowerCamelCase__ : x % 36 ) __lowerCamelCase : Optional[Any] =numpy.vectorize(lowerCamelCase__ ) def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray ): '''simple docstring''' __a = self.modulus(__lowercase ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key __a = encrypt_key.shape[0] def UpperCamelCase_ ( self : Dict , __lowercase : str ): '''simple docstring''' return self.key_string.index(__lowercase ) def UpperCamelCase_ ( self : Dict , __lowercase : int ): '''simple docstring''' return self.key_string[round(__lowercase )] def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: __a = det % len(self.key_string ) __a = len(self.key_string ) if greatest_common_divisor(__lowercase , len(self.key_string ) ) != 1: __a = ( F"determinant modular {req_l} of encryption key({det}) " F"is not co prime w.r.t {req_l}.\nTry another key." ) raise ValueError(__lowercase ) def UpperCamelCase_ ( self : Dict , __lowercase : str ): '''simple docstring''' __a = [char for char in text.upper() if char in self.key_string] __a = chars[-1] while len(__lowercase ) % self.break_key != 0: chars.append(__lowercase ) return "".join(__lowercase ) def UpperCamelCase_ ( self : List[str] , __lowercase : str ): '''simple docstring''' __a = self.process_text(text.upper() ) __a = """""" for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ): __a = text[i : i + self.break_key] __a = [self.replace_letters(__lowercase ) for char in batch] __a = numpy.array([vec] ).T __a = self.modulus(self.encrypt_key.dot(__lowercase ) ).T.tolist()[ 0 ] __a = """""".join( self.replace_digits(__lowercase ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' __a = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: __a = det % len(self.key_string ) __a = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: __a = i break __a = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(__lowercase ) ) def UpperCamelCase_ ( self : Any , __lowercase : str ): '''simple docstring''' __a = self.make_decrypt_key() __a = self.process_text(text.upper() ) __a = """""" for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ): __a = text[i : i + self.break_key] __a = [self.replace_letters(__lowercase ) for char in batch] __a = numpy.array([vec] ).T __a = self.modulus(decrypt_key.dot(__lowercase ) ).T.tolist()[0] __a = """""".join( self.replace_digits(__lowercase ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def lowerCAmelCase__ ( ): """simple docstring""" __a = int(input("""Enter the order of the encryption key: """ ) ) __a = [] print("""Enter each row of the encryption key with space separated integers""" ) for _ in range(_SCREAMING_SNAKE_CASE ): __a = [int(_SCREAMING_SNAKE_CASE ) for x in input().split()] hill_matrix.append(_SCREAMING_SNAKE_CASE ) __a = HillCipher(numpy.array(_SCREAMING_SNAKE_CASE ) ) print("""Would you like to encrypt or decrypt some text? (1 or 2)""" ) __a = input("""\n1. Encrypt\n2. Decrypt\n""" ) if option == "1": __a = input("""What text would you like to encrypt?: """ ) print("""Your encrypted text is:""" ) print(hc.encrypt(_SCREAMING_SNAKE_CASE ) ) elif option == "2": __a = input("""What text would you like to decrypt?: """ ) print("""Your decrypted text is:""" ) print(hc.decrypt(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
302
0
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowerCAmelCase_ = '''bert-base-cased''' lowerCAmelCase_ = '''google/pegasus-xsum''' lowerCAmelCase_ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] lowerCAmelCase_ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] lowerCAmelCase_ = '''patrickvonplaten/t5-tiny-random''' lowerCAmelCase_ = '''sshleifer/bart-tiny-random''' lowerCAmelCase_ = '''sshleifer/tiny-mbart''' lowerCAmelCase_ = '''sshleifer/tiny-marian-en-de''' def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = '''\n'''.join(SCREAMING_SNAKE_CASE__ ) Path(SCREAMING_SNAKE_CASE__ ).open('''w''' ).writelines(SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(SCREAMING_SNAKE_CASE__ , F'''{split}.source''' ) , SCREAMING_SNAKE_CASE__ ) _dump_articles(os.path.join(SCREAMING_SNAKE_CASE__ , F'''{split}.target''' ) , SCREAMING_SNAKE_CASE__ ) return tmp_dir class snake_case_ ( __A ): '''simple docstring''' @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def snake_case__( self : List[str] , _UpperCamelCase : Optional[Any] ) ->int: snake_case_ = AutoTokenizer.from_pretrained(_UpperCamelCase ) snake_case_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) snake_case_ = max(len(tokenizer.encode(_UpperCamelCase ) ) for a in ARTICLES ) snake_case_ = max(len(tokenizer.encode(_UpperCamelCase ) ) for a in SUMMARIES ) snake_case_ = 4 snake_case_ = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated snake_case_, snake_case_ = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error. snake_case_ = SeqaSeqDataset( _UpperCamelCase , data_dir=_UpperCamelCase , type_path='''train''' , max_source_length=_UpperCamelCase , max_target_length=_UpperCamelCase , src_lang=_UpperCamelCase , tgt_lang=_UpperCamelCase , ) snake_case_ = DataLoader(_UpperCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(_UpperCamelCase , _UpperCamelCase ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place snake_case_ = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def snake_case__( self : int , _UpperCamelCase : Union[str, Any] ) ->Any: snake_case_ = AutoTokenizer.from_pretrained(_UpperCamelCase ) snake_case_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) snake_case_ = max(len(tokenizer.encode(_UpperCamelCase ) ) for a in ARTICLES ) snake_case_ = max(len(tokenizer.encode(_UpperCamelCase ) ) for a in SUMMARIES ) snake_case_ = 4 snake_case_ = LegacySeqaSeqDataset( _UpperCamelCase , data_dir=_UpperCamelCase , type_path='''train''' , max_source_length=2_0 , max_target_length=_UpperCamelCase , ) snake_case_ = DataLoader(_UpperCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def snake_case__( self : List[Any] ) ->List[Any]: snake_case_ = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' ) snake_case_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) snake_case_ = tmp_dir.joinpath('''train.source''' ).open().readlines() snake_case_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(_UpperCamelCase , _UpperCamelCase , 1_2_8 , _UpperCamelCase ) snake_case_ = {x.name for x in tmp_dir.iterdir()} snake_case_ = {x.name for x in save_dir.iterdir()} snake_case_ = save_dir.joinpath('''train.source''' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(_UpperCamelCase ) < len(_UpperCamelCase ) assert len(_UpperCamelCase ) == 1 assert len(packed_examples[0] ) == sum(len(_UpperCamelCase ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' ) def snake_case__( self : Union[str, Any] ) ->Optional[Any]: if not FAIRSEQ_AVAILABLE: return snake_case_, snake_case_, snake_case_ = self._get_dataset(max_len=6_4 ) snake_case_ = 6_4 snake_case_ = ds.make_dynamic_sampler(_UpperCamelCase , required_batch_size_multiple=_UpperCamelCase ) snake_case_ = [len(_UpperCamelCase ) for x in batch_sampler] assert len(set(_UpperCamelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(_UpperCamelCase ) == len(_UpperCamelCase ) # no dropped or added examples snake_case_ = DataLoader(_UpperCamelCase , batch_sampler=_UpperCamelCase , collate_fn=ds.collate_fn , num_workers=2 ) snake_case_ = [] snake_case_ = [] for batch in data_loader: snake_case_ = batch['''input_ids'''].shape snake_case_ = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple snake_case_ = np.product(batch['''input_ids'''].shape ) num_src_per_batch.append(_UpperCamelCase ) if num_src_tokens > (max_tokens * 1.1): failures.append(_UpperCamelCase ) assert num_src_per_batch[0] == max(_UpperCamelCase ) if failures: raise AssertionError(f'''too many tokens in {len(_UpperCamelCase )} batches''' ) def snake_case__( self : List[str] ) ->int: snake_case_, snake_case_, snake_case_ = self._get_dataset(max_len=5_1_2 ) snake_case_ = 2 snake_case_ = ds.make_sortish_sampler(_UpperCamelCase , shuffle=_UpperCamelCase ) snake_case_ = DataLoader(_UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=ds.collate_fn , num_workers=2 ) snake_case_ = DataLoader(_UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=_UpperCamelCase ) snake_case_ = tokenizer.pad_token_id def count_pad_tokens(_UpperCamelCase : str , _UpperCamelCase : List[str]="input_ids" ): return [batch[k].eq(_UpperCamelCase ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(_UpperCamelCase , k='''labels''' ) ) < sum(count_pad_tokens(_UpperCamelCase , k='''labels''' ) ) assert sum(count_pad_tokens(_UpperCamelCase ) ) < sum(count_pad_tokens(_UpperCamelCase ) ) assert len(_UpperCamelCase ) == len(_UpperCamelCase ) def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[Any]=1_0_0_0 , _UpperCamelCase : List[Any]=1_2_8 ) ->List[Any]: if os.getenv('''USE_REAL_DATA''' , _UpperCamelCase ): snake_case_ = '''examples/seq2seq/wmt_en_ro''' snake_case_ = max_len * 2 * 6_4 if not Path(_UpperCamelCase ).joinpath('''train.len''' ).exists(): save_len_file(_UpperCamelCase , _UpperCamelCase ) else: snake_case_ = '''examples/seq2seq/test_data/wmt_en_ro''' snake_case_ = max_len * 4 save_len_file(_UpperCamelCase , _UpperCamelCase ) snake_case_ = AutoTokenizer.from_pretrained(_UpperCamelCase ) snake_case_ = SeqaSeqDataset( _UpperCamelCase , data_dir=_UpperCamelCase , type_path='''train''' , max_source_length=_UpperCamelCase , max_target_length=_UpperCamelCase , n_obs=_UpperCamelCase , ) return ds, max_tokens, tokenizer def snake_case__( self : Union[str, Any] ) ->Optional[int]: snake_case_, snake_case_, snake_case_ = self._get_dataset() snake_case_ = set(DistributedSortishSampler(_UpperCamelCase , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=_UpperCamelCase ) ) snake_case_ = set(DistributedSortishSampler(_UpperCamelCase , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=_UpperCamelCase ) ) assert idsa.intersection(_UpperCamelCase ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def snake_case__( self : List[str] , _UpperCamelCase : Tuple ) ->Optional[Any]: snake_case_ = AutoTokenizer.from_pretrained(_UpperCamelCase , use_fast=_UpperCamelCase ) if tok_name == MBART_TINY: snake_case_ = SeqaSeqDataset( _UpperCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , ) snake_case_ = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: snake_case_ = SeqaSeqDataset( _UpperCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , ) snake_case_ = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(_UpperCamelCase ) == 1 if tok_name == BART_TINY else len(_UpperCamelCase ) == 0
8
from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : List[Any] ='autoformer' __lowerCamelCase : str ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self : List[Any] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : str = "student_t" , __lowercase : str = "nll" , __lowercase : int = 1 , __lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase : bool = True , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : Optional[List[int]] = None , __lowercase : Optional[List[int]] = None , __lowercase : int = 64 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 32 , __lowercase : int = 32 , __lowercase : str = "gelu" , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 100 , __lowercase : float = 0.02 , __lowercase : bool = True , __lowercase : List[Any]=True , __lowercase : int = 10 , __lowercase : int = 25 , __lowercase : int = 3 , **__lowercase : Optional[int] , ): '''simple docstring''' # time series specific configuration __a = prediction_length __a = context_length if context_length is not None else prediction_length __a = distribution_output __a = loss __a = input_size __a = num_time_features __a = lags_sequence __a = scaling __a = num_dynamic_real_features __a = num_static_real_features __a = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(__lowercase ) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""" ) __a = cardinality else: __a = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(__lowercase ) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""" ) __a = embedding_dimension else: __a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] __a = num_parallel_samples # Transformer architecture configuration __a = input_size * len(self.lags_sequence ) + self._number_of_features __a = d_model __a = encoder_attention_heads __a = decoder_attention_heads __a = encoder_ffn_dim __a = decoder_ffn_dim __a = encoder_layers __a = decoder_layers __a = dropout __a = attention_dropout __a = activation_dropout __a = encoder_layerdrop __a = decoder_layerdrop __a = activation_function __a = init_std __a = use_cache # Autoformer __a = label_length __a = moving_average __a = autocorrelation_factor super().__init__(is_encoder_decoder=__lowercase , **__lowercase ) @property def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
302
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase : str ={'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] =[ 'MRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MraForMaskedLM', 'MraForMultipleChoice', 'MraForQuestionAnswering', 'MraForSequenceClassification', 'MraForTokenClassification', 'MraLayer', 'MraModel', 'MraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure)
9
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
302
0
from __future__ import annotations def lowerCAmelCase_ ( __a , __a , __a , __a ) -> list: """simple docstring""" lowerCamelCase__: Any =[] lowerCamelCase__ , lowerCamelCase__: Any =input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) lowerCamelCase__: str =result + left + right return input_list def lowerCAmelCase_ ( __a ) -> list: """simple docstring""" if len(__a ) <= 1: return input_list lowerCamelCase__: Any =list(__a ) # iteration for two-way merging lowerCamelCase__: str =2 while p <= len(__a ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(__a ) , __a ): lowerCamelCase__: Dict =i lowerCamelCase__: List[str] =i + p - 1 lowerCamelCase__: int =(low + high + 1) // 2 lowerCamelCase__: Optional[int] =merge(__a , __a , __a , __a ) # final merge of last two parts if p * 2 >= len(__a ): lowerCamelCase__: List[Any] =i lowerCamelCase__: Optional[int] =merge(__a , 0 , __a , len(__a ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": __A = input("Enter numbers separated by a comma:\n").strip() if user_input == "": __A = [] else: __A = [int(item.strip()) for item in user_input.split(",")] print(iter_merge_sort(unsorted))
10
from __future__ import annotations lowerCamelCase__ = """#""" class SCREAMING_SNAKE_CASE : def __init__( self : Optional[Any] ): '''simple docstring''' __a = {} def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str ): '''simple docstring''' __a = self._trie for char in text: if char not in trie: __a = {} __a = trie[char] __a = True def UpperCamelCase_ ( self : Tuple , __lowercase : str ): '''simple docstring''' __a = self._trie for char in prefix: if char in trie: __a = trie[char] else: return [] return self._elements(__lowercase ) def UpperCamelCase_ ( self : Optional[int] , __lowercase : dict ): '''simple docstring''' __a = [] for c, v in d.items(): __a = [""" """] if c == END else [(c + s) for s in self._elements(__lowercase )] result.extend(__lowercase ) return tuple(__lowercase ) lowerCamelCase__ = Trie() lowerCamelCase__ = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""") for word in words: trie.insert_word(word) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" __a = trie.find_word(_SCREAMING_SNAKE_CASE ) return tuple(string + word for word in suffixes ) def lowerCAmelCase__ ( ): """simple docstring""" print(autocomplete_using_trie("""de""" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
302
0
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar lowerCAmelCase__ = TypeVar('T') def _UpperCAmelCase (UpperCamelCase__ : int ): return (position - 1) // 2 def _UpperCAmelCase (UpperCamelCase__ : int ): return (2 * position) + 1 def _UpperCAmelCase (UpperCamelCase__ : int ): return (2 * position) + 2 class lowerCAmelCase__ ( Generic[T]): '''simple docstring''' def __init__( self) -> None: _A : list[tuple[T, int]] = [] _A : dict[T, int] = {} _A : int = 0 def __len__( self) -> int: return self.elements def __repr__( self) -> str: return str(self.heap) def _lowerCamelCase ( self) -> bool: # Check if the priority queue is empty return self.elements == 0 def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight)) _A : int = self.elements self.elements += 1 self._bubble_up(__lowerCamelCase) def _lowerCamelCase ( self) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1) _A , _A : Optional[Any] = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: _A , _A : str = self.heap[0] self._bubble_down(__lowerCamelCase) return elem def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> None: # Update the weight of the given key _A : Optional[Any] = self.position_map[elem] _A : Any = (elem, weight) if position > 0: _A : Any = get_parent_position(__lowerCamelCase) _A , _A : Tuple = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__lowerCamelCase) else: self._bubble_down(__lowerCamelCase) else: self._bubble_down(__lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] _A : Optional[int] = self.position_map[elem] if curr_pos == 0: return None _A : Optional[Any] = get_parent_position(__lowerCamelCase) _A , _A : Any = self.heap[curr_pos] _A , _A : str = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__lowerCamelCase , __lowerCamelCase) return self._bubble_up(__lowerCamelCase) return None def _lowerCamelCase ( self , __lowerCamelCase) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] _A : Any = self.position_map[elem] _A , _A : Optional[int] = self.heap[curr_pos] _A : Optional[int] = get_child_left_position(__lowerCamelCase) _A : List[str] = get_child_right_position(__lowerCamelCase) if child_left_position < self.elements and child_right_position < self.elements: _A , _A : str = self.heap[child_left_position] _A , _A : List[str] = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__lowerCamelCase , __lowerCamelCase) return self._bubble_down(__lowerCamelCase) if child_left_position < self.elements: _A , _A : int = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__lowerCamelCase , __lowerCamelCase) return self._bubble_down(__lowerCamelCase) else: return None if child_right_position < self.elements: _A , _A : int = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__lowerCamelCase , __lowerCamelCase) return self._bubble_down(__lowerCamelCase) return None def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> None: # Swap the nodes at the given positions _A : str = self.heap[nodea_pos][0] _A : List[Any] = self.heap[nodea_pos][0] _A , _A : Dict = ( self.heap[nodea_pos], self.heap[nodea_pos], ) _A : Tuple = nodea_pos _A : int = nodea_pos class lowerCAmelCase__ ( Generic[T]): '''simple docstring''' def __init__( self) -> None: _A : dict[T, dict[T, int]] = {} _A : int = 0 def __repr__( self) -> str: return str(self.connections) def __len__( self) -> int: return self.nodes def _lowerCamelCase ( self , __lowerCamelCase) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: _A : Optional[int] = {} self.nodes += 1 def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> None: # Add an edge between 2 nodes in the graph self.add_node(__lowerCamelCase) self.add_node(__lowerCamelCase) _A : Optional[int] = weight _A : str = weight def _UpperCAmelCase (UpperCamelCase__ : GraphUndirectedWeighted[T] , ): _A : dict[T, int] = {node: maxsize for node in graph.connections} _A : dict[T, T | None] = {node: None for node in graph.connections} _A : MinPriorityQueue[T] = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(UpperCamelCase__ , UpperCamelCase__ ) if priority_queue.is_empty(): return dist, parent # initialization _A : List[str] = priority_queue.extract_min() _A : Tuple = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A : Tuple = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(UpperCamelCase__ , dist[neighbour] ) _A : int = node # running prim's algorithm while not priority_queue.is_empty(): _A : Optional[Any] = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A : Optional[Any] = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(UpperCamelCase__ , dist[neighbour] ) _A : Any = node return dist, parent
11
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : torch.FloatTensor class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self : Dict , __lowercase : int = 32 , __lowercase : int = 64 , __lowercase : int = 20 , __lowercase : int = 768 , __lowercase : Any=77 , __lowercase : Optional[int]=4 , __lowercase : float = 0.0 , __lowercase : str = "silu" , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "linear" , __lowercase : Optional[str] = "prd" , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , ): '''simple docstring''' super().__init__() __a = num_attention_heads __a = attention_head_dim __a = num_attention_heads * attention_head_dim __a = additional_embeddings __a = time_embed_dim or inner_dim __a = embedding_proj_dim or embedding_dim __a = clip_embed_dim or embedding_dim __a = Timesteps(__lowercase , __lowercase , 0 ) __a = TimestepEmbedding(__lowercase , __lowercase , out_dim=__lowercase , act_fn=__lowercase ) __a = nn.Linear(__lowercase , __lowercase ) if embedding_proj_norm_type is None: __a = None elif embedding_proj_norm_type == "layer": __a = nn.LayerNorm(__lowercase ) else: raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" ) __a = nn.Linear(__lowercase , __lowercase ) if encoder_hid_proj_type is None: __a = None elif encoder_hid_proj_type == "linear": __a = nn.Linear(__lowercase , __lowercase ) else: raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" ) __a = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowercase ) ) if added_emb_type == "prd": __a = nn.Parameter(torch.zeros(1 , 1 , __lowercase ) ) elif added_emb_type is None: __a = None else: raise ValueError( F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." ) __a = nn.ModuleList( [ BasicTransformerBlock( __lowercase , __lowercase , __lowercase , dropout=__lowercase , activation_fn="""gelu""" , attention_bias=__lowercase , ) for d in range(__lowercase ) ] ) if norm_in_type == "layer": __a = nn.LayerNorm(__lowercase ) elif norm_in_type is None: __a = None else: raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." ) __a = nn.LayerNorm(__lowercase ) __a = nn.Linear(__lowercase , __lowercase ) __a = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 ) causal_attention_mask.triu_(1 ) __a = causal_attention_mask[None, ...] self.register_buffer("""causal_attention_mask""" , __lowercase , persistent=__lowercase ) __a = nn.Parameter(torch.zeros(1 , __lowercase ) ) __a = nn.Parameter(torch.zeros(1 , __lowercase ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' __a = {} def fn_recursive_add_processors(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict[str, AttentionProcessor] ): if hasattr(__lowercase , """set_processor""" ): __a = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"{name}.{sub_name}" , __lowercase , __lowercase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__lowercase , __lowercase , __lowercase ) return processors def UpperCamelCase_ ( self : List[str] , __lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ): '''simple docstring''' __a = len(self.attn_processors.keys() ) if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != count: raise ValueError( F"A dict of processors was passed, but the number of processors {len(__lowercase )} does not match the" F" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict ): if hasattr(__lowercase , """set_processor""" ): if not isinstance(__lowercase , __lowercase ): module.set_processor(__lowercase ) else: module.set_processor(processor.pop(F"{name}.processor" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"{name}.{sub_name}" , __lowercase , __lowercase ) for name, module in self.named_children(): fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase ) def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' self.set_attn_processor(AttnProcessor() ) def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[torch.Tensor, float, int] , __lowercase : torch.FloatTensor , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.BoolTensor] = None , __lowercase : bool = True , ): '''simple docstring''' __a = hidden_states.shape[0] __a = timestep if not torch.is_tensor(__lowercase ): __a = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0: __a = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __a = timesteps * torch.ones(__lowercase , dtype=timesteps.dtype , device=timesteps.device ) __a = self.time_proj(__lowercase ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. __a = timesteps_projected.to(dtype=self.dtype ) __a = self.time_embedding(__lowercase ) if self.embedding_proj_norm is not None: __a = self.embedding_proj_norm(__lowercase ) __a = self.embedding_proj(__lowercase ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: __a = self.encoder_hidden_states_proj(__lowercase ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" ) __a = self.proj_in(__lowercase ) __a = self.positional_embedding.to(hidden_states.dtype ) __a = [] __a = 0 if encoder_hidden_states is not None: additional_embeds.append(__lowercase ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: __a = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: __a = hidden_states[:, None, :] __a = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: __a = self.prd_embedding.to(hidden_states.dtype ).expand(__lowercase , -1 , -1 ) additional_embeds.append(__lowercase ) __a = torch.cat( __lowercase , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens __a = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: __a = F.pad( __lowercase , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) __a = hidden_states + positional_embeddings if attention_mask is not None: __a = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0 __a = F.pad(__lowercase , (0, self.additional_embeddings) , value=0.0 ) __a = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) __a = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: __a = self.norm_in(__lowercase ) for block in self.transformer_blocks: __a = block(__lowercase , attention_mask=__lowercase ) __a = self.norm_out(__lowercase ) if self.prd_embedding is not None: __a = hidden_states[:, -1] else: __a = hidden_states[:, additional_embeddings_len:] __a = self.proj_to_clip_embeddings(__lowercase ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=__lowercase ) def UpperCamelCase_ ( self : Any , __lowercase : Tuple ): '''simple docstring''' __a = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
302
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase_ = { 'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'], 'configuration_maskformer_swin': ['MaskFormerSwinConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['MaskFormerFeatureExtractor'] UpperCAmelCase_ = ['MaskFormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'MaskFormerForInstanceSegmentation', 'MaskFormerModel', 'MaskFormerPreTrainedModel', ] UpperCAmelCase_ = [ 'MaskFormerSwinBackbone', 'MaskFormerSwinModel', 'MaskFormerSwinPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
12
from functools import lru_cache def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" __a = 2 __a = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(_SCREAMING_SNAKE_CASE ) if n > 1: factors.add(_SCREAMING_SNAKE_CASE ) return factors @lru_cache def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" return len(unique_prime_factors(_SCREAMING_SNAKE_CASE ) ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ): """simple docstring""" return len(set(_SCREAMING_SNAKE_CASE ) ) in (0, 1) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" __a = 2 while True: # Increment each value of a generated range __a = [base + i for i in range(_SCREAMING_SNAKE_CASE )] # Run elements through out unique_prime_factors function # Append our target number to the end. __a = [upf_len(_SCREAMING_SNAKE_CASE ) for x in group] checker.append(_SCREAMING_SNAKE_CASE ) # If all numbers in the list are equal, return the group variable. if equality(_SCREAMING_SNAKE_CASE ): return group # Increment our base variable by 1 base += 1 def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 4 ): """simple docstring""" __a = run(_SCREAMING_SNAKE_CASE ) return results[0] if len(_SCREAMING_SNAKE_CASE ) else None if __name__ == "__main__": print(solution())
302
0
def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = generate_pascal_triangle(_UpperCAmelCase ) for row_idx in range(_UpperCAmelCase ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=" " ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=" " ) else: print(triangle[row_idx][col_idx] , end="" ) print() def A_ ( _UpperCAmelCase ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError("The input value of 'num_rows' should be 'int'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0" ) SCREAMING_SNAKE_CASE_: list[list[int]] = [] for current_row_idx in range(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = populate_current_row(_UpperCAmelCase , _UpperCAmelCase ) triangle.append(_UpperCAmelCase ) return triangle def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Dict = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = 1, 1 for current_col_idx in range(1 , _UpperCAmelCase ): calculate_current_element( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return current_row def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): SCREAMING_SNAKE_CASE_: str = triangle[current_row_idx - 1][current_col_idx - 1] SCREAMING_SNAKE_CASE_: Optional[int] = triangle[current_row_idx - 1][current_col_idx] SCREAMING_SNAKE_CASE_: str = above_to_left_elt + above_to_right_elt def A_ ( _UpperCAmelCase ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError("The input value of 'num_rows' should be 'int'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0" ) SCREAMING_SNAKE_CASE_: list[list[int]] = [[1]] for row_index in range(1 , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = [0] + result[-1] + [0] SCREAMING_SNAKE_CASE_: Tuple = row_index + 1 # Calculate the number of distinct elements in a row SCREAMING_SNAKE_CASE_: Any = sum(divmod(_UpperCAmelCase , 2 ) ) SCREAMING_SNAKE_CASE_: Optional[int] = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] SCREAMING_SNAKE_CASE_: Tuple = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() SCREAMING_SNAKE_CASE_: List[str] = row_first_half + row_second_half result.append(_UpperCAmelCase ) return result def A_ ( ): from collections.abc import Callable from timeit import timeit def benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase ) -> None: SCREAMING_SNAKE_CASE_: int = f"{func.__name__}({value})" SCREAMING_SNAKE_CASE_: List[str] = timeit(f"__main__.{call}" , setup="import __main__" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(f"{call:38} -- {timing:.4f} seconds" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
13
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" __a = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: __a = 128 elif "12-12" in model_name: __a = 12 __a = 12 elif "14-14" in model_name: __a = 14 __a = 14 elif "16-16" in model_name: __a = 16 __a = 16 else: raise ValueError("""Model not supported""" ) __a = """huggingface/label-files""" if "speech-commands" in model_name: __a = 35 __a = """speech-commands-v2-id2label.json""" else: __a = 527 __a = """audioset-id2label.json""" __a = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) __a = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __a = idalabel __a = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" if "module.v" in name: __a = name.replace("""module.v""" , """audio_spectrogram_transformer""" ) if "cls_token" in name: __a = name.replace("""cls_token""" , """embeddings.cls_token""" ) if "dist_token" in name: __a = name.replace("""dist_token""" , """embeddings.distillation_token""" ) if "pos_embed" in name: __a = name.replace("""pos_embed""" , """embeddings.position_embeddings""" ) if "patch_embed.proj" in name: __a = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) # transformer blocks if "blocks" in name: __a = name.replace("""blocks""" , """encoder.layer""" ) if "attn.proj" in name: __a = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: __a = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: __a = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: __a = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: __a = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: __a = name.replace("""mlp.fc2""" , """output.dense""" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: __a = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" ) # classifier head if "module.mlp_head.0" in name: __a = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" ) if "module.mlp_head.1" in name: __a = name.replace("""module.mlp_head.1""" , """classifier.dense""" ) return name def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" for key in orig_state_dict.copy().keys(): __a = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "qkv" in key: __a = key.split(""".""" ) __a = int(key_split[3] ) __a = config.hidden_size if "weight" in key: __a = val[:dim, :] __a = val[dim : dim * 2, :] __a = val[-dim:, :] else: __a = val[:dim] __a = val[dim : dim * 2] __a = val[-dim:] else: __a = val return orig_state_dict def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" __a = [ """module.v.head.weight""", """module.v.head.bias""", """module.v.head_dist.weight""", """module.v.head_dist.bias""", ] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @torch.no_grad() def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str]=False ): """simple docstring""" __a = get_audio_spectrogram_transformer_config(_SCREAMING_SNAKE_CASE ) __a = { """ast-finetuned-audioset-10-10-0.4593""": ( """https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.450""": ( """https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448""": ( """https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448-v2""": ( """https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1""" ), """ast-finetuned-audioset-12-12-0.447""": ( """https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1""" ), """ast-finetuned-audioset-14-14-0.443""": ( """https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1""" ), """ast-finetuned-audioset-16-16-0.442""": ( """https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1""" ), """ast-finetuned-speech-commands-v2""": ( """https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1""" ), } # load original state_dict __a = model_name_to_url[model_name] __a = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" ) # remove some keys remove_keys(_SCREAMING_SNAKE_CASE ) # rename some keys __a = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load 🤗 model __a = ASTForAudioClassification(_SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 __a = -4.267_7393 if """speech-commands""" not in model_name else -6.84_5978 __a = 4.568_9974 if """speech-commands""" not in model_name else 5.565_4526 __a = 1024 if """speech-commands""" not in model_name else 128 __a = ASTFeatureExtractor(mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) if "speech-commands" in model_name: __a = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" ) __a = dataset[0]["""audio"""]["""array"""] else: __a = hf_hub_download( repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , ) __a , __a = torchaudio.load(_SCREAMING_SNAKE_CASE ) __a = waveform.squeeze().numpy() __a = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=1_6000 , return_tensors="""pt""" ) # forward pass __a = model(**_SCREAMING_SNAKE_CASE ) __a = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": __a = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": __a = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": __a = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": __a = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": __a = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": __a = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": __a = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": __a = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError("""Unknown model name""" ) if not torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ): raise ValueError("""Logits don't match""" ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f"Saving feature extractor to {pytorch_dump_folder_path}" ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: print("""Pushing model and feature extractor to the hub...""" ) model.push_to_hub(f"MIT/{model_name}" ) feature_extractor.push_to_hub(f"MIT/{model_name}" ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowerCamelCase__ = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
302
0
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> list: """simple docstring""" A__ = len(lowercase_ ) A__ = [] for i in range(len(lowercase_ ) - pat_len + 1 ): A__ = True for j in range(lowercase_ ): if s[i + j] != pattern[j]: A__ = False break if match_found: position.append(lowercase_ ) return position if __name__ == "__main__": assert naive_pattern_search("""ABCDEFG""", """DE""") == [3] print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC"""))
14
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: lowerCamelCase__ = None lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCamelCase__ = { """vocab_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""", }, """tokenizer_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""", }, } lowerCamelCase__ = { """albert-base-v1""": 512, """albert-large-v1""": 512, """albert-xlarge-v1""": 512, """albert-xxlarge-v1""": 512, """albert-base-v2""": 512, """albert-large-v2""": 512, """albert-xlarge-v2""": 512, """albert-xxlarge-v2""": 512, } lowerCamelCase__ = """▁""" class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : List[Any] =VOCAB_FILES_NAMES __lowerCamelCase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : Any =AlbertTokenizer def __init__( self : Tuple , __lowercase : Union[str, Any]=None , __lowercase : Optional[int]=None , __lowercase : int=True , __lowercase : Dict=True , __lowercase : str=False , __lowercase : str="[CLS]" , __lowercase : List[Any]="[SEP]" , __lowercase : Any="<unk>" , __lowercase : List[Any]="[SEP]" , __lowercase : List[Any]="<pad>" , __lowercase : Optional[Any]="[CLS]" , __lowercase : List[str]="[MASK]" , **__lowercase : str , ): '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. __a = ( AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token ) super().__init__( __lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , ) __a = do_lower_case __a = remove_space __a = keep_accents __a = vocab_file __a = False if not self.vocab_file else True def UpperCamelCase_ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ): '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase_ ( self : str , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ): '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__lowercase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return __a = os.path.join( __lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ): copyfile(self.vocab_file , __lowercase ) return (out_vocab_file,)
302
0
from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean SCREAMING_SNAKE_CASE :Dict = 0 SCREAMING_SNAKE_CASE :Optional[int] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] SCREAMING_SNAKE_CASE :Union[str, Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right SCREAMING_SNAKE_CASE :Optional[int] = tuple[int, int] class UpperCAmelCase : '''simple docstring''' def __init__( self : Optional[int] ,A : int ,A : int ,A : int ,A : int ,A : int ,A : Node | None ,): __A = pos_x __A = pos_y __A = (pos_y, pos_x) __A = goal_x __A = goal_y __A = g_cost __A = parent __A = self.calculate_heuristic() __A = self.g_cost + self.h_cost def UpperCamelCase_ ( self : Optional[int] ): __A = self.pos_x - self.goal_x __A = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(A ) + abs(A ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : Union[str, Any] ,A : Node ): return self.f_cost < other.f_cost class UpperCAmelCase : '''simple docstring''' def __init__( self : Optional[Any] ,A : TPosition ,A : TPosition ): __A = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,A ) __A = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,9_99_99 ,A ) __A = [self.start] __A = [] __A = False def UpperCamelCase_ ( self : Dict ): while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() __A = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(A ) self.closed_nodes.append(A ) __A = self.get_successors(A ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(A ) else: # retrieve the best current path __A = self.open_nodes.pop(self.open_nodes.index(A ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(A ) else: self.open_nodes.append(A ) return [self.start.pos] def UpperCamelCase_ ( self : List[str] ,A : Node ): __A = [] for action in delta: __A = parent.pos_x + action[1] __A = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( A ,A ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,A ,) ) return successors def UpperCamelCase_ ( self : int ,A : Node | None ): __A = node __A = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) __A = current_node.parent path.reverse() return path class UpperCAmelCase : '''simple docstring''' def __init__( self : List[str] ,A : TPosition ,A : TPosition ): __A = AStar(A ,A ) __A = AStar(A ,A ) __A = False def UpperCamelCase_ ( self : List[str] ): while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() __A = self.fwd_astar.open_nodes.pop(0 ) __A = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( A ,A ) self.fwd_astar.closed_nodes.append(A ) self.bwd_astar.closed_nodes.append(A ) __A = current_bwd_node __A = current_fwd_node __A = { self.fwd_astar: self.fwd_astar.get_successors(A ), self.bwd_astar: self.bwd_astar.get_successors(A ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(A ) else: # retrieve the best current path __A = astar.open_nodes.pop( astar.open_nodes.index(A ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(A ) else: astar.open_nodes.append(A ) return [self.fwd_astar.start.pos] def UpperCamelCase_ ( self : Dict ,A : Node ,A : Node ): __A = self.fwd_astar.retrace_path(A ) __A = self.bwd_astar.retrace_path(A ) bwd_path.pop() bwd_path.reverse() __A = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] SCREAMING_SNAKE_CASE :List[Any] = (0, 0) SCREAMING_SNAKE_CASE :int = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) SCREAMING_SNAKE_CASE :Dict = time.time() SCREAMING_SNAKE_CASE :Union[str, Any] = AStar(init, goal) SCREAMING_SNAKE_CASE :Union[str, Any] = a_star.search() SCREAMING_SNAKE_CASE :Optional[int] = time.time() - start_time print(f'''AStar execution time = {end_time:f} seconds''') SCREAMING_SNAKE_CASE :Tuple = time.time() SCREAMING_SNAKE_CASE :List[Any] = BidirectionalAStar(init, goal) SCREAMING_SNAKE_CASE :str = time.time() - bd_start_time print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
15
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Optional[int] =(IPNDMScheduler,) __lowerCamelCase : int =(('num_inference_steps', 50),) def UpperCamelCase_ ( self : str , **__lowercase : Dict ): '''simple docstring''' __a = {"""num_train_timesteps""": 1000} config.update(**__lowercase ) return config def UpperCamelCase_ ( self : Any , __lowercase : Tuple=0 , **__lowercase : Dict ): '''simple docstring''' __a = dict(self.forward_default_kwargs ) __a = kwargs.pop("""num_inference_steps""" , __lowercase ) __a = self.dummy_sample __a = 0.1 * sample __a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __a = self.get_scheduler_config(**__lowercase ) __a = scheduler_class(**__lowercase ) scheduler.set_timesteps(__lowercase ) # copy over dummy past residuals __a = dummy_past_residuals[:] if time_step is None: __a = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowercase ) __a = scheduler_class.from_pretrained(__lowercase ) new_scheduler.set_timesteps(__lowercase ) # copy over dummy past residuals __a = dummy_past_residuals[:] __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self : str ): '''simple docstring''' pass def UpperCamelCase_ ( self : str , __lowercase : int=0 , **__lowercase : Dict ): '''simple docstring''' __a = dict(self.forward_default_kwargs ) __a = kwargs.pop("""num_inference_steps""" , __lowercase ) __a = self.dummy_sample __a = 0.1 * sample __a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __a = self.get_scheduler_config() __a = scheduler_class(**__lowercase ) scheduler.set_timesteps(__lowercase ) # copy over dummy past residuals (must be after setting timesteps) __a = dummy_past_residuals[:] if time_step is None: __a = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowercase ) __a = scheduler_class.from_pretrained(__lowercase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowercase ) # copy over dummy past residual (must be after setting timesteps) __a = dummy_past_residuals[:] __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self : List[str] , **__lowercase : Dict ): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config(**__lowercase ) __a = scheduler_class(**__lowercase ) __a = 10 __a = self.dummy_model() __a = self.dummy_sample_deter scheduler.set_timesteps(__lowercase ) for i, t in enumerate(scheduler.timesteps ): __a = model(__lowercase , __lowercase ) __a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample for i, t in enumerate(scheduler.timesteps ): __a = model(__lowercase , __lowercase ) __a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample return sample def UpperCamelCase_ ( self : str ): '''simple docstring''' __a = dict(self.forward_default_kwargs ) __a = kwargs.pop("""num_inference_steps""" , __lowercase ) for scheduler_class in self.scheduler_classes: __a = self.get_scheduler_config() __a = scheduler_class(**__lowercase ) __a = self.dummy_sample __a = 0.1 * sample if num_inference_steps is not None and hasattr(__lowercase , """set_timesteps""" ): scheduler.set_timesteps(__lowercase ) elif num_inference_steps is not None and not hasattr(__lowercase , """set_timesteps""" ): __a = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] __a = dummy_past_residuals[:] __a = scheduler.timesteps[5] __a = scheduler.timesteps[6] __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=__lowercase , time_step=__lowercase ) def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=__lowercase , time_step=__lowercase ) def UpperCamelCase_ ( self : int ): '''simple docstring''' __a = self.full_loop() __a = torch.mean(torch.abs(__lowercase ) ) assert abs(result_mean.item() - 2540529 ) < 10
302
0
"""simple docstring""" import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = '▁' lowerCAmelCase_ = { 'vocab_file': 'vocab.json', 'spm_file': 'sentencepiece.bpe.model', } lowerCAmelCase_ = { 'vocab_file': { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json' ), }, 'spm_file': { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model' ) }, } lowerCAmelCase_ = { 'facebook/s2t-small-librispeech-asr': 1_024, } lowerCAmelCase_ = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de'] lowerCAmelCase_ = {'mustc': MUSTC_LANGS} class __A ( A_ ): '''simple docstring''' lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : int = MAX_MODEL_INPUT_SIZES lowerCAmelCase : Optional[int] = ["input_ids", "attention_mask"] lowerCAmelCase : List[int] = [] def __init__( self : Optional[Any] ,_snake_case : int ,_snake_case : Optional[int] ,_snake_case : Optional[int]="<s>" ,_snake_case : Union[str, Any]="</s>" ,_snake_case : Any="<pad>" ,_snake_case : Dict="<unk>" ,_snake_case : Optional[Any]=False ,_snake_case : int=False ,_snake_case : Optional[int]=None ,_snake_case : Dict=None ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : int ,) -> None: """simple docstring""" lowercase__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,pad_token=_snake_case ,do_upper_case=_snake_case ,do_lower_case=_snake_case ,tgt_lang=_snake_case ,lang_codes=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,) lowercase__ : str = do_upper_case lowercase__ : List[str] = do_lower_case lowercase__ : Any = load_json(_snake_case ) lowercase__ : Dict = {v: k for k, v in self.encoder.items()} lowercase__ : Dict = spm_file lowercase__ : List[str] = load_spm(_snake_case ,self.sp_model_kwargs ) if lang_codes is not None: lowercase__ : Optional[Any] = lang_codes lowercase__ : Any = LANGUAGES[lang_codes] lowercase__ : Optional[int] = [f"""<lang:{lang}>""" for lang in self.langs] lowercase__ : Union[str, Any] = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs} lowercase__ : str = self.lang_tokens lowercase__ : Optional[int] = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: lowercase__ : Dict = {} @property def UpperCAmelCase ( self : Tuple ) -> int: """simple docstring""" return len(self.encoder ) @property def UpperCAmelCase ( self : str ) -> str: """simple docstring""" return self._tgt_lang @tgt_lang.setter def UpperCAmelCase ( self : Optional[int] ,_snake_case : List[Any] ) -> None: """simple docstring""" lowercase__ : Optional[Any] = new_tgt_lang self.set_tgt_lang_special_tokens(_snake_case ) def UpperCAmelCase ( self : Optional[int] ,_snake_case : str ) -> None: """simple docstring""" lowercase__ : List[Any] = self.lang_code_to_id[tgt_lang] lowercase__ : str = [lang_code_id] def UpperCAmelCase ( self : Optional[Any] ,_snake_case : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(_snake_case ,out_type=_snake_case ) def UpperCAmelCase ( self : Dict ,_snake_case : Union[str, Any] ) -> Tuple: """simple docstring""" return self.encoder.get(_snake_case ,self.encoder[self.unk_token] ) def UpperCAmelCase ( self : List[str] ,_snake_case : int ) -> str: """simple docstring""" return self.decoder.get(_snake_case ,self.unk_token ) def UpperCAmelCase ( self : List[Any] ,_snake_case : List[str] ) -> str: """simple docstring""" lowercase__ : str = [] lowercase__ : List[str] = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: lowercase__ : int = self.sp_model.decode(_snake_case ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " lowercase__ : str = [] else: current_sub_tokens.append(_snake_case ) lowercase__ : Tuple = self.sp_model.decode(_snake_case ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def UpperCAmelCase ( self : str ,_snake_case : Tuple ,_snake_case : Any=None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def UpperCAmelCase ( self : Dict ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case ) lowercase__ : List[Any] = [1] * len(self.prefix_tokens ) lowercase__ : Tuple = [1] if token_ids_a is None: return prefix_ones + ([0] * len(_snake_case )) + suffix_ones return prefix_ones + ([0] * len(_snake_case )) + ([0] * len(_snake_case )) + suffix_ones def UpperCAmelCase ( self : Dict ) -> Dict: """simple docstring""" lowercase__ : Any = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : int ) -> Dict: """simple docstring""" lowercase__ : int = self.__dict__.copy() lowercase__ : Dict = None return state def __setstate__( self : Union[str, Any] ,_snake_case : Dict ) -> None: """simple docstring""" lowercase__ : Optional[int] = d # for backward compatibility if not hasattr(self ,'''sp_model_kwargs''' ): lowercase__ : int = {} lowercase__ : List[Any] = load_spm(self.spm_file ,self.sp_model_kwargs ) def UpperCAmelCase ( self : Optional[int] ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]: """simple docstring""" lowercase__ : Tuple = Path(_snake_case ) assert save_dir.is_dir(), f"""{save_directory} should be a directory""" lowercase__ : List[Any] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) lowercase__ : Optional[Any] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder ,_snake_case ) if os.path.abspath(self.spm_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file ,_snake_case ) elif not os.path.isfile(self.spm_file ): with open(_snake_case ,'''wb''' ) as fi: lowercase__ : Dict = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (str(_snake_case ), str(_snake_case )) def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> sentencepiece.SentencePieceProcessor: lowercase__ : Any = sentencepiece.SentencePieceProcessor(**__lowerCamelCase ) spm.Load(str(__lowerCamelCase ) ) return spm def __UpperCAmelCase ( __lowerCamelCase ) -> Union[Dict, List]: with open(__lowerCamelCase , '''r''' ) as f: return json.load(__lowerCamelCase ) def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> None: with open(__lowerCamelCase , '''w''' ) as f: json.dump(__lowerCamelCase , __lowerCamelCase , indent=2 )
16
from __future__ import annotations lowerCamelCase__ = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } class SCREAMING_SNAKE_CASE : def __init__( self : Tuple , __lowercase : dict[str, list[str]] , __lowercase : str ): '''simple docstring''' __a = graph # mapping node to its parent in resulting breadth first tree __a = {} __a = source_vertex def UpperCamelCase_ ( self : Any ): '''simple docstring''' __a = {self.source_vertex} __a = None __a = [self.source_vertex] # first in first out queue while queue: __a = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(__lowercase ) __a = vertex queue.append(__lowercase ) def UpperCamelCase_ ( self : Optional[int] , __lowercase : str ): '''simple docstring''' if target_vertex == self.source_vertex: return self.source_vertex __a = self.parent.get(__lowercase ) if target_vertex_parent is None: __a = ( F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" ) raise ValueError(__lowercase ) return self.shortest_path(__lowercase ) + F"->{target_vertex}" if __name__ == "__main__": lowerCamelCase__ = Graph(graph, """G""") g.breath_first_search() print(g.shortest_path("""D""")) print(g.shortest_path("""G""")) print(g.shortest_path("""Foo"""))
302
0
"""simple docstring""" _a = 6_55_21 def _A ( UpperCamelCase_ : str) -> int: '''simple docstring''' __lowercase = 1 __lowercase = 0 for plain_chr in plain_text: __lowercase = (a + ord(UpperCamelCase_)) % MOD_ADLER __lowercase = (b + a) % MOD_ADLER return (b << 16) | a
17
import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Tuple =KandinskyVaaPriorPipeline __lowerCamelCase : Union[str, Any] =['prompt'] __lowerCamelCase : Any =['prompt', 'negative_prompt'] __lowerCamelCase : List[str] =[ 'num_images_per_prompt', 'generator', 'num_inference_steps', 'latents', 'negative_prompt', 'guidance_scale', 'output_type', 'return_dict', ] __lowerCamelCase : List[Any] =False @property def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return 32 @property def UpperCamelCase_ ( self : Any ): '''simple docstring''' return 32 @property def UpperCamelCase_ ( self : str ): '''simple docstring''' return self.time_input_dim @property def UpperCamelCase_ ( self : str ): '''simple docstring''' return self.time_input_dim * 4 @property def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return 100 @property def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' __a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' torch.manual_seed(0 ) __a = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(__lowercase ) @property def UpperCamelCase_ ( self : int ): '''simple docstring''' torch.manual_seed(0 ) __a = { """num_attention_heads""": 2, """attention_head_dim""": 12, """embedding_dim""": self.text_embedder_hidden_size, """num_layers""": 1, } __a = PriorTransformer(**__lowercase ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 __a = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' torch.manual_seed(0 ) __a = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) __a = CLIPVisionModelWithProjection(__lowercase ) return model @property def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' __a = CLIPImageProcessor( crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , ) return image_processor def UpperCamelCase_ ( self : str ): '''simple docstring''' __a = self.dummy_prior __a = self.dummy_image_encoder __a = self.dummy_text_encoder __a = self.dummy_tokenizer __a = self.dummy_image_processor __a = UnCLIPScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=10.0 , ) __a = { """prior""": prior, """image_encoder""": image_encoder, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """scheduler""": scheduler, """image_processor""": image_processor, } return components def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[str] , __lowercase : Any=0 ): '''simple docstring''' if str(__lowercase ).startswith("""mps""" ): __a = torch.manual_seed(__lowercase ) else: __a = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __a = { """prompt""": """horse""", """generator""": generator, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' __a = """cpu""" __a = self.get_dummy_components() __a = self.pipeline_class(**__lowercase ) __a = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __a = pipe(**self.get_dummy_inputs(__lowercase ) ) __a = output.image_embeds __a = pipe( **self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0] __a = image[0, -10:] __a = image_from_tuple[0, -10:] assert image.shape == (1, 32) __a = np.array( [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def UpperCamelCase_ ( self : Dict ): '''simple docstring''' __a = torch_device == """cpu""" __a = True __a = False self._test_inference_batch_single_identical( test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , ) @skip_mps def UpperCamelCase_ ( self : Any ): '''simple docstring''' __a = torch_device == """cpu""" __a = False self._test_attention_slicing_forward_pass( test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
302
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCamelCase : List[str] = { '''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[Any] = [ '''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GraphormerForGraphClassification''', '''GraphormerModel''', '''GraphormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys __lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
18
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = Dict[str, Any] lowerCamelCase__ = List[Prediction] @add_end_docstrings(lowerCamelCase__ ) class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): def __init__( self : Tuple , *__lowercase : Tuple , **__lowercase : Optional[int] ): '''simple docstring''' super().__init__(*__lowercase , **__lowercase ) if self.framework == "tf": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) requires_backends(self , """vision""" ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def UpperCamelCase_ ( self : Optional[int] , **__lowercase : List[str] ): '''simple docstring''' __a = {} if "threshold" in kwargs: __a = kwargs["""threshold"""] return {}, {}, postprocess_kwargs def __call__( self : List[Any] , *__lowercase : Any , **__lowercase : Tuple ): '''simple docstring''' return super().__call__(*__lowercase , **__lowercase ) def UpperCamelCase_ ( self : str , __lowercase : Tuple ): '''simple docstring''' __a = load_image(__lowercase ) __a = torch.IntTensor([[image.height, image.width]] ) __a = self.image_processor(images=[image] , return_tensors="""pt""" ) if self.tokenizer is not None: __a = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" ) __a = target_size return inputs def UpperCamelCase_ ( self : Dict , __lowercase : List[str] ): '''simple docstring''' __a = model_inputs.pop("""target_size""" ) __a = self.model(**__lowercase ) __a = outputs.__class__({"""target_size""": target_size, **outputs} ) if self.tokenizer is not None: __a = model_inputs["""bbox"""] return model_outputs def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any]=0.9 ): '''simple docstring''' __a = model_outputs["""target_size"""] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. __a , __a = target_size[0].tolist() def unnormalize(__lowercase : Optional[Any] ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) __a , __a = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) __a = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] __a = [unnormalize(__lowercase ) for bbox in model_outputs["""bbox"""].squeeze(0 )] __a = ["""score""", """label""", """box"""] __a = [dict(zip(__lowercase , __lowercase ) ) for vals in zip(scores.tolist() , __lowercase , __lowercase ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel __a = self.image_processor.post_process_object_detection(__lowercase , __lowercase , __lowercase ) __a = raw_annotations[0] __a = raw_annotation["""scores"""] __a = raw_annotation["""labels"""] __a = raw_annotation["""boxes"""] __a = scores.tolist() __a = [self.model.config.idalabel[label.item()] for label in labels] __a = [self._get_bounding_box(__lowercase ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] __a = ["""score""", """label""", """box"""] __a = [ dict(zip(__lowercase , __lowercase ) ) for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] ) ] return annotation def UpperCamelCase_ ( self : Optional[int] , __lowercase : "torch.Tensor" ): '''simple docstring''' if self.framework != "pt": raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" ) __a , __a , __a , __a = box.int().tolist() __a = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
302
0
from collections.abc import Callable import numpy as np def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = int(np.ceil((x_end - xa) / step_size ) ) lowerCamelCase_ = np.zeros((n + 1,) ) lowerCamelCase_ = ya lowerCamelCase_ = xa for k in range(lowerCamelCase__ ): lowerCamelCase_ = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
19
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase__ = { """configuration_efficientnet""": [ """EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientNetConfig""", """EfficientNetOnnxConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""EfficientNetImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientNetForImageClassification""", """EfficientNetModel""", """EfficientNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
302
0
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent lowercase : Tuple = {"""UserAgent""": UserAgent().random} def _snake_case( SCREAMING_SNAKE_CASE__ ) -> dict: lowercase : Any = script.contents[0] lowercase : Union[str, Any] = json.loads(data[data.find("""{\"config\"""" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class __snake_case : def __init__( self ,snake_case ): '''simple docstring''' lowercase : List[Any] = f"https://www.instagram.com/{username}/" lowercase : str = self.get_json() def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Tuple = requests.get(self.url ,headers=snake_case ).text lowercase : Dict = BeautifulSoup(snake_case ,"""html.parser""" ).find_all("""script""" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self ): '''simple docstring''' return f"{self.__class__.__name__}('{self.username}')" def __str__( self ): '''simple docstring''' return f"{self.fullname} ({self.username}) is {self.biography}" @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return self.user_data["username"] @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return self.user_data["full_name"] @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return self.user_data["biography"] @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return self.user_data["business_email"] @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return self.user_data["external_url"] @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return self.user_data["edge_followed_by"]["count"] @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return self.user_data["edge_follow"]["count"] @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return self.user_data["edge_owner_to_timeline_media"]["count"] @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return self.user_data["profile_pic_url_hd"] @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return self.user_data["is_verified"] @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return self.user_data["is_private"] def _snake_case( SCREAMING_SNAKE_CASE__ = "github" ) -> None: import os if os.environ.get("""CI""" ): return # test failing on GitHub Actions lowercase : Tuple = InstagramUser(SCREAMING_SNAKE_CASE__ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , SCREAMING_SNAKE_CASE__ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 120_000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("""https://instagram.""" ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() lowercase : List[str] = InstagramUser("""github""") print(instagram_user) print(F'''{instagram_user.number_of_posts = }''') print(F'''{instagram_user.number_of_followers = }''') print(F'''{instagram_user.number_of_followings = }''') print(F'''{instagram_user.email = }''') print(F'''{instagram_user.website = }''') print(F'''{instagram_user.profile_picture_url = }''') print(F'''{instagram_user.is_verified = }''') print(F'''{instagram_user.is_private = }''')
20
import random def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" __a , __a , __a = [], [], [] for element in data: if element < pivot: less.append(_SCREAMING_SNAKE_CASE ) elif element > pivot: greater.append(_SCREAMING_SNAKE_CASE ) else: equal.append(_SCREAMING_SNAKE_CASE ) return less, equal, greater def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0: return None __a = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )] __a = 0 __a , __a , __a = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a = len(_SCREAMING_SNAKE_CASE ) __a = len(_SCREAMING_SNAKE_CASE ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # must be in larger else: return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
302
0
import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class _lowerCamelCase( unittest.TestCase ): def UpperCamelCase ( self) -> Tuple: """simple docstring""" _lowercase : Optional[Any] = 'laion/clap-htsat-unfused' _lowercase : Optional[Any] = tempfile.mkdtemp() def UpperCamelCase ( self, **lowerCamelCase) -> List[Any]: """simple docstring""" return RobertaTokenizer.from_pretrained(self.checkpoint, **lowerCamelCase) def UpperCamelCase ( self, **lowerCamelCase) -> str: """simple docstring""" return ClapFeatureExtractor.from_pretrained(self.checkpoint, **lowerCamelCase) def UpperCamelCase ( self) -> List[str]: """simple docstring""" shutil.rmtree(self.tmpdirname) def UpperCamelCase ( self) -> Any: """simple docstring""" _lowercase : str = self.get_tokenizer() _lowercase : Optional[int] = self.get_feature_extractor() _lowercase : int = ClapProcessor(tokenizer=lowerCamelCase, feature_extractor=lowerCamelCase) processor.save_pretrained(self.tmpdirname) _lowercase : Union[str, Any] = ClapProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, lowerCamelCase) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, lowerCamelCase) def UpperCamelCase ( self) -> Optional[int]: """simple docstring""" _lowercase : Optional[int] = ClapProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) processor.save_pretrained(self.tmpdirname) _lowercase : str = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)') _lowercase : List[str] = self.get_feature_extractor(do_normalize=lowerCamelCase, padding_value=1.0) _lowercase : Tuple = ClapProcessor.from_pretrained( self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowerCamelCase, padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, lowerCamelCase) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, lowerCamelCase) def UpperCamelCase ( self) -> int: """simple docstring""" _lowercase : Union[str, Any] = self.get_feature_extractor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : Dict = ClapProcessor(tokenizer=lowerCamelCase, feature_extractor=lowerCamelCase) _lowercase : Union[str, Any] = floats_list((3, 10_00)) _lowercase : str = feature_extractor(lowerCamelCase, return_tensors='np') _lowercase : Tuple = processor(audios=lowerCamelCase, return_tensors='np') for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2) def UpperCamelCase ( self) -> Tuple: """simple docstring""" _lowercase : Optional[int] = self.get_feature_extractor() _lowercase : Any = self.get_tokenizer() _lowercase : Dict = ClapProcessor(tokenizer=lowerCamelCase, feature_extractor=lowerCamelCase) _lowercase : Any = 'This is a test string' _lowercase : List[Any] = processor(text=lowerCamelCase) _lowercase : Dict = tokenizer(lowerCamelCase) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def UpperCamelCase ( self) -> Optional[int]: """simple docstring""" _lowercase : Tuple = self.get_feature_extractor() _lowercase : str = self.get_tokenizer() _lowercase : List[str] = ClapProcessor(tokenizer=lowerCamelCase, feature_extractor=lowerCamelCase) _lowercase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowercase : str = processor.batch_decode(lowerCamelCase) _lowercase : Any = tokenizer.batch_decode(lowerCamelCase) self.assertListEqual(lowerCamelCase, lowerCamelCase) def UpperCamelCase ( self) -> int: """simple docstring""" _lowercase : Dict = self.get_feature_extractor() _lowercase : Any = self.get_tokenizer() _lowercase : Tuple = ClapProcessor(tokenizer=lowerCamelCase, feature_extractor=lowerCamelCase) self.assertListEqual( processor.model_input_names[2:], feature_extractor.model_input_names, msg='`processor` and `feature_extractor` model input names do not match', )
21
from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline lowerCamelCase__ = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): def __init__( self : Optional[int] , **__lowercase : Dict ): '''simple docstring''' super().__init__(**__lowercase ) if self.framework != "pt": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) # No specific FOR_XXX available yet def __call__( self : str , __lowercase : Union[np.ndarray, bytes, str] , **__lowercase : int ): '''simple docstring''' return super().__call__(__lowercase , **__lowercase ) def UpperCamelCase_ ( self : List[Any] , **__lowercase : Union[str, Any] ): '''simple docstring''' __a = {} if "candidate_labels" in kwargs: __a = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: __a = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def UpperCamelCase_ ( self : int , __lowercase : Dict , __lowercase : Dict=None , __lowercase : str="This is a sound of {}." ): '''simple docstring''' if isinstance(__lowercase , __lowercase ): if audio.startswith("""http://""" ) or audio.startswith("""https://""" ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png __a = requests.get(__lowercase ).content else: with open(__lowercase , """rb""" ) as f: __a = f.read() if isinstance(__lowercase , __lowercase ): __a = ffmpeg_read(__lowercase , self.feature_extractor.sampling_rate ) if not isinstance(__lowercase , np.ndarray ): raise ValueError("""We expect a numpy ndarray as input""" ) if len(audio.shape ) != 1: raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" ) __a = self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" ) __a = candidate_labels __a = [hypothesis_template.format(__lowercase ) for x in candidate_labels] __a = self.tokenizer(__lowercase , return_tensors=self.framework , padding=__lowercase ) __a = [text_inputs] return inputs def UpperCamelCase_ ( self : Any , __lowercase : Any ): '''simple docstring''' __a = model_inputs.pop("""candidate_labels""" ) __a = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] , __lowercase ): __a = text_inputs[0] else: # Batching case. __a = text_inputs[0][0] __a = self.model(**__lowercase , **__lowercase ) __a = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_audio, } return model_outputs def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Dict ): '''simple docstring''' __a = model_outputs.pop("""candidate_labels""" ) __a = model_outputs["""logits"""][0] if self.framework == "pt": __a = logits.softmax(dim=0 ) __a = probs.tolist() else: raise ValueError("""`tf` framework not supported.""" ) __a = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(__lowercase , __lowercase ) , key=lambda __lowercase : -x[0] ) ] return result
302
0
'''simple docstring''' import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE :int = { '''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''', } class A_ ( lowerCAmelCase_ ): _lowerCamelCase : Any = """align_text_model""" def __init__( self : str , snake_case_ : Any=3_0_5_2_2 , snake_case_ : Dict=7_6_8 , snake_case_ : Union[str, Any]=1_2 , snake_case_ : Dict=1_2 , snake_case_ : Optional[int]=3_0_7_2 , snake_case_ : Optional[int]="gelu" , snake_case_ : List[str]=0.1 , snake_case_ : Tuple=0.1 , snake_case_ : Dict=5_1_2 , snake_case_ : Optional[Any]=2 , snake_case_ : int=0.0_2 , snake_case_ : Dict=1e-12 , snake_case_ : List[Any]=0 , snake_case_ : Optional[Any]="absolute" , snake_case_ : Dict=True , **snake_case_ : Dict , ): super().__init__(**snake_case_ ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = pad_token_id @classmethod def lowercase ( cls : Optional[int] , snake_case_ : Union[str, os.PathLike] , **snake_case_ : Optional[int] ): cls._set_token_in_kwargs(snake_case_ ) _UpperCAmelCase , _UpperCAmelCase = cls.get_config_dict(snake_case_ , **snake_case_ ) # get the text config dict if we are loading from AlignConfig if config_dict.get("model_type" ) == "align": _UpperCAmelCase = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(snake_case_ , **snake_case_ ) class A_ ( lowerCAmelCase_ ): _lowerCamelCase : Optional[int] = """align_vision_model""" def __init__( self : int , snake_case_ : int = 3 , snake_case_ : int = 6_0_0 , snake_case_ : float = 2.0 , snake_case_ : float = 3.1 , snake_case_ : int = 8 , snake_case_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , snake_case_ : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , snake_case_ : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , snake_case_ : List[int] = [] , snake_case_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , snake_case_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , snake_case_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , snake_case_ : float = 0.2_5 , snake_case_ : str = "swish" , snake_case_ : int = 2_5_6_0 , snake_case_ : str = "mean" , snake_case_ : float = 0.0_2 , snake_case_ : float = 0.0_0_1 , snake_case_ : float = 0.9_9 , snake_case_ : float = 0.2 , **snake_case_ : Dict , ): super().__init__(**snake_case_ ) _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = width_coefficient _UpperCAmelCase = depth_coefficient _UpperCAmelCase = depth_divisor _UpperCAmelCase = kernel_sizes _UpperCAmelCase = in_channels _UpperCAmelCase = out_channels _UpperCAmelCase = depthwise_padding _UpperCAmelCase = strides _UpperCAmelCase = num_block_repeats _UpperCAmelCase = expand_ratios _UpperCAmelCase = squeeze_expansion_ratio _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dim _UpperCAmelCase = pooling_type _UpperCAmelCase = initializer_range _UpperCAmelCase = batch_norm_eps _UpperCAmelCase = batch_norm_momentum _UpperCAmelCase = drop_connect_rate _UpperCAmelCase = sum(snake_case_ ) * 4 @classmethod def lowercase ( cls : int , snake_case_ : Union[str, os.PathLike] , **snake_case_ : Tuple ): cls._set_token_in_kwargs(snake_case_ ) _UpperCAmelCase , _UpperCAmelCase = cls.get_config_dict(snake_case_ , **snake_case_ ) # get the vision config dict if we are loading from AlignConfig if config_dict.get("model_type" ) == "align": _UpperCAmelCase = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(snake_case_ , **snake_case_ ) class A_ ( lowerCAmelCase_ ): _lowerCamelCase : Optional[int] = """align""" _lowerCamelCase : Any = True def __init__( self : Tuple , snake_case_ : Dict=None , snake_case_ : Dict=None , snake_case_ : Tuple=6_4_0 , snake_case_ : List[str]=1.0 , snake_case_ : Dict=0.0_2 , **snake_case_ : Optional[int] , ): super().__init__(**snake_case_ ) if text_config is None: _UpperCAmelCase = {} logger.info("text_config is None. Initializing the AlignTextConfig with default values." ) if vision_config is None: _UpperCAmelCase = {} logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." ) _UpperCAmelCase = AlignTextConfig(**snake_case_ ) _UpperCAmelCase = AlignVisionConfig(**snake_case_ ) _UpperCAmelCase = projection_dim _UpperCAmelCase = temperature_init_value _UpperCAmelCase = initializer_range @classmethod def lowercase ( cls : Optional[Any] , snake_case_ : AlignTextConfig , snake_case_ : AlignVisionConfig , **snake_case_ : Union[str, Any] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_ ) def lowercase ( self : List[Any] ): _UpperCAmelCase = copy.deepcopy(self.__dict__ ) _UpperCAmelCase = self.text_config.to_dict() _UpperCAmelCase = self.vision_config.to_dict() _UpperCAmelCase = self.__class__.model_type return output
22
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowerCamelCase__ = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Dict =['pixel_values'] def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Dict , ): '''simple docstring''' super().__init__(**__lowercase ) __a = size if size is not None else {"""height""": 224, """width""": 224} __a = get_size_dict(__lowercase ) __a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __a = get_size_dict(__lowercase , default_to_square=__lowercase , param_name="""crop_size""" ) __a = do_resize __a = do_rescale __a = do_normalize __a = do_center_crop __a = crop_size __a = size __a = resample __a = rescale_factor __a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __a = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ): '''simple docstring''' __a = get_size_dict(__lowercase ) if "shortest_edge" in size: __a = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: __a = (size["""height"""], size["""width"""]) else: raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" ) return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase ) def UpperCamelCase_ ( self : str , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ): '''simple docstring''' __a = get_size_dict(__lowercase ) if "height" not in size or "width" not in size: raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" ) return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase ) def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str ): '''simple docstring''' return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase ) def UpperCamelCase_ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ): '''simple docstring''' return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase ) def UpperCamelCase_ ( self : Tuple , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : int = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : List[Any] , ): '''simple docstring''' __a = do_resize if do_resize is not None else self.do_resize __a = do_rescale if do_rescale is not None else self.do_rescale __a = do_normalize if do_normalize is not None else self.do_normalize __a = do_center_crop if do_center_crop is not None else self.do_center_crop __a = crop_size if crop_size is not None else self.crop_size __a = get_size_dict(__lowercase , param_name="""crop_size""" , default_to_square=__lowercase ) __a = resample if resample is not None else self.resample __a = rescale_factor if rescale_factor is not None else self.rescale_factor __a = image_mean if image_mean is not None else self.image_mean __a = image_std if image_std is not None else self.image_std __a = size if size is not None else self.size __a = get_size_dict(__lowercase ) if not is_batched(__lowercase ): __a = [images] if not valid_images(__lowercase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. __a = [to_numpy_array(__lowercase ) for image in images] if do_resize: __a = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images] if do_center_crop: __a = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images] if do_rescale: __a = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images] if do_normalize: __a = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images] __a = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images] __a = {"""pixel_values""": images} return BatchFeature(data=__lowercase , tensor_type=__lowercase )
302
0
'''simple docstring''' import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class SCREAMING_SNAKE_CASE: """simple docstring""" def __init__( self : int , __snake_case : Union[str, Any] , __snake_case : Optional[int]=13 , __snake_case : str=7 , __snake_case : int=True , __snake_case : List[Any]=True , __snake_case : Any=False , __snake_case : Union[str, Any]=True , __snake_case : Dict=99 , __snake_case : Optional[int]=32 , __snake_case : Any=5 , __snake_case : Any=4 , __snake_case : List[Any]=37 , __snake_case : Dict="gelu" , __snake_case : Optional[int]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Optional[int]=16 , __snake_case : Any=2 , __snake_case : Optional[Any]=0.02 , __snake_case : List[str]=3 , __snake_case : str=4 , __snake_case : Optional[int]=None , ) -> Optional[int]: UpperCAmelCase : Dict = parent UpperCAmelCase : List[str] = batch_size UpperCAmelCase : List[str] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : int = use_input_mask UpperCAmelCase : Any = use_token_type_ids UpperCAmelCase : Optional[Any] = use_labels UpperCAmelCase : Dict = vocab_size UpperCAmelCase : List[str] = hidden_size UpperCAmelCase : Any = num_hidden_layers UpperCAmelCase : Union[str, Any] = num_attention_heads UpperCAmelCase : int = intermediate_size UpperCAmelCase : Tuple = hidden_act UpperCAmelCase : str = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : Optional[int] = max_position_embeddings UpperCAmelCase : Dict = type_vocab_size UpperCAmelCase : Optional[int] = type_sequence_label_size UpperCAmelCase : Any = initializer_range UpperCAmelCase : int = num_labels UpperCAmelCase : Tuple = num_choices UpperCAmelCase : List[str] = scope def A ( self : Any ) -> str: UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Optional[int] = None if self.use_input_mask: UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Optional[int] = None if self.use_token_type_ids: UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase : Tuple = None UpperCAmelCase : Optional[int] = None UpperCAmelCase : List[Any] = None if self.use_labels: UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase : Dict = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : Optional[Any] ) -> Optional[int]: return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , use_stable_embedding=__snake_case , ) def A ( self : Dict , __snake_case : Any , __snake_case : int , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Any , __snake_case : int ) -> int: UpperCAmelCase : Optional[Any] = OpenLlamaModel(config=__snake_case ) model.to(__snake_case ) model.eval() UpperCAmelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case ) UpperCAmelCase : Dict = model(__snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : str , ) -> Tuple: UpperCAmelCase : Optional[int] = True UpperCAmelCase : Optional[Any] = OpenLlamaModel(__snake_case ) model.to(__snake_case ) model.eval() UpperCAmelCase : Union[str, Any] = model( __snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , ) UpperCAmelCase : List[str] = model( __snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , ) UpperCAmelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : Dict , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : int , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , ) -> Optional[int]: UpperCAmelCase : int = OpenLlamaForCausalLM(config=__snake_case ) model.to(__snake_case ) model.eval() UpperCAmelCase : Dict = model(__snake_case , attention_mask=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : Optional[Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Any , __snake_case : Optional[int] , ) -> List[Any]: UpperCAmelCase : List[str] = True UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : Union[str, Any] = OpenLlamaForCausalLM(config=__snake_case ) model.to(__snake_case ) model.eval() # first forward pass UpperCAmelCase : Any = model( __snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , use_cache=__snake_case , ) UpperCAmelCase : Optional[int] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase : Tuple = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase : Dict = model( __snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , output_hidden_states=__snake_case , )['''hidden_states'''][0] UpperCAmelCase : Union[str, Any] = model( __snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , past_key_values=__snake_case , output_hidden_states=__snake_case , )['''hidden_states'''][0] # select random slice UpperCAmelCase : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-3 ) ) def A ( self : Dict ) -> Optional[Any]: UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) : List[str] = config_and_inputs UpperCAmelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE( A__ , A__ , A__ , unittest.TestCase ): """simple docstring""" lowerCamelCase__ = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) lowerCamelCase__ = (OpenLlamaForCausalLM,) if is_torch_available() else () lowerCamelCase__ = ( { """feature-extraction""": OpenLlamaModel, """text-classification""": OpenLlamaForSequenceClassification, """text-generation""": OpenLlamaForCausalLM, """zero-shot""": OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False def A ( self : Union[str, Any] ) -> int: UpperCAmelCase : Optional[int] = OpenLlamaModelTester(self ) UpperCAmelCase : Any = ConfigTester(self , config_class=__snake_case , hidden_size=37 ) def A ( self : Dict ) -> Union[str, Any]: self.config_tester.run_common_tests() def A ( self : Dict ) -> str: UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) def A ( self : Union[str, Any] ) -> Optional[int]: UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase : Union[str, Any] = type self.model_tester.create_and_check_model(*__snake_case ) def A ( self : Dict ) -> Union[str, Any]: UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Tuple = 3 UpperCAmelCase : Optional[int] = input_dict['''input_ids'''] UpperCAmelCase : Optional[Any] = input_ids.ne(1 ).to(__snake_case ) UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase : Any = OpenLlamaForSequenceClassification(__snake_case ) model.to(__snake_case ) model.eval() UpperCAmelCase : Any = model(__snake_case , attention_mask=__snake_case , labels=__snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def A ( self : List[str] ) -> str: UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : List[str] = 3 UpperCAmelCase : Dict = '''single_label_classification''' UpperCAmelCase : int = input_dict['''input_ids'''] UpperCAmelCase : List[Any] = input_ids.ne(1 ).to(__snake_case ) UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(__snake_case ) model.to(__snake_case ) model.eval() UpperCAmelCase : List[Any] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def A ( self : Union[str, Any] ) -> Optional[int]: UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : str = 3 UpperCAmelCase : List[str] = '''multi_label_classification''' UpperCAmelCase : Union[str, Any] = input_dict['''input_ids'''] UpperCAmelCase : Optional[int] = input_ids.ne(1 ).to(__snake_case ) UpperCAmelCase : Union[str, Any] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCAmelCase : Any = OpenLlamaForSequenceClassification(__snake_case ) model.to(__snake_case ) model.eval() UpperCAmelCase : Tuple = model(__snake_case , attention_mask=__snake_case , labels=__snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' ) def A ( self : List[str] ) -> Optional[int]: pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def A ( self : Any , __snake_case : Any ) -> List[Any]: UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Optional[Any] = ids_tensor([1, 10] , config.vocab_size ) UpperCAmelCase : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : List[str] = OpenLlamaModel(__snake_case ) original_model.to(__snake_case ) original_model.eval() UpperCAmelCase : int = original_model(__snake_case ).last_hidden_state UpperCAmelCase : Optional[int] = original_model(__snake_case ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : str = {'''type''': scaling_type, '''factor''': 10.0} UpperCAmelCase : List[str] = OpenLlamaModel(__snake_case ) scaled_model.to(__snake_case ) scaled_model.eval() UpperCAmelCase : List[str] = scaled_model(__snake_case ).last_hidden_state UpperCAmelCase : List[str] = scaled_model(__snake_case ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
23
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' __a = """hf-internal-testing/tiny-random-t5""" __a = AutoTokenizer.from_pretrained(__lowercase ) __a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ) __a = tokenizer("""This is me""" , return_tensors="""pt""" ) __a = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) __a = model.generate(**__lowercase ) __a = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowercase ) __a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) __a = model_reloaded.generate(**__lowercase ) self.assertTrue(torch.allclose(__lowercase , __lowercase ) ) def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' __a = """hf-internal-testing/tiny-random-t5""" __a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ) __a = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(__lowercase ): model.save_pretrained(__lowercase ) __a = model.reverse_bettertransformer() model.save_pretrained(__lowercase )
302
0
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str ) -> bool: __snake_case = len(snake_case_ ) + 1 __snake_case = len(snake_case_ ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. __snake_case = [[0 for i in range(snake_case_ )] for j in range(snake_case_ )] # since string of zero length match pattern of zero length __snake_case = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , snake_case_ ): __snake_case = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , snake_case_ ): __snake_case = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , snake_case_ ): for j in range(1 , snake_case_ ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": __snake_case = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: __snake_case = 1 elif pattern[j - 2] in (input_string[i - 1], "."): __snake_case = dp[i - 1][j] else: __snake_case = 0 else: __snake_case = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") snake_case_ = 'aab' snake_case_ = 'c*a*b' # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(F'{input_string} matches the given pattern {pattern}') else: print(F'{input_string} does not match with the given pattern {pattern}')
24
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig lowerCamelCase__ = { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Optional[Any] ='albert' def __init__( self : Optional[Any] , __lowercase : Union[str, Any]=30000 , __lowercase : List[str]=128 , __lowercase : Optional[Any]=4096 , __lowercase : Dict=12 , __lowercase : Any=1 , __lowercase : Optional[Any]=64 , __lowercase : Any=16384 , __lowercase : Any=1 , __lowercase : Union[str, Any]="gelu_new" , __lowercase : List[str]=0 , __lowercase : int=0 , __lowercase : Dict=512 , __lowercase : str=2 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=1E-12 , __lowercase : int=0.1 , __lowercase : Any="absolute" , __lowercase : Optional[int]=0 , __lowercase : Dict=2 , __lowercase : Optional[Any]=3 , **__lowercase : Any , ): '''simple docstring''' super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) __a = vocab_size __a = embedding_size __a = hidden_size __a = num_hidden_layers __a = num_hidden_groups __a = num_attention_heads __a = inner_group_num __a = hidden_act __a = intermediate_size __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = type_vocab_size __a = initializer_range __a = layer_norm_eps __a = classifier_dropout_prob __a = position_embedding_type class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): @property def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' if self.task == "multiple-choice": __a = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __a = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
302
0
"""simple docstring""" import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase_ (a__ , unittest.TestCase ): """simple docstring""" pass @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" @property def __magic_name__ (self ) -> Optional[int]: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = ort.SessionOptions() SCREAMING_SNAKE_CASE__ : Union[str, Any] = False return options def __magic_name__ (self ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) SCREAMING_SNAKE_CASE__ : Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) SCREAMING_SNAKE_CASE__ : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained( """runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = """A red cat sitting on a park bench""" SCREAMING_SNAKE_CASE__ : int = np.random.RandomState(0 ) SCREAMING_SNAKE_CASE__ : Dict = pipe( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=10 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = output.images SCREAMING_SNAKE_CASE__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) SCREAMING_SNAKE_CASE__ : Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __magic_name__ (self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) SCREAMING_SNAKE_CASE__ : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained( """runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = """A red cat sitting on a park bench""" SCREAMING_SNAKE_CASE__ : Dict = np.random.RandomState(0 ) SCREAMING_SNAKE_CASE__ : str = pipe( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=20 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : Any = output.images SCREAMING_SNAKE_CASE__ : int = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
25
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCamelCase__ = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
302
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _snake_case = { "configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"], "tokenization_deberta": ["DebertaTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ["DebertaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "DebertaForMaskedLM", "DebertaForQuestionAnswering", "DebertaForSequenceClassification", "DebertaForTokenClassification", "DebertaModel", "DebertaPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDebertaForMaskedLM", "TFDebertaForQuestionAnswering", "TFDebertaForSequenceClassification", "TFDebertaForTokenClassification", "TFDebertaModel", "TFDebertaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
26
class SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , __lowercase : Union[str, Any] ): '''simple docstring''' __a = val __a = None __a = None def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any ): '''simple docstring''' if self.val: if val < self.val: if self.left is None: __a = Node(__lowercase ) else: self.left.insert(__lowercase ) elif val > self.val: if self.right is None: __a = Node(__lowercase ) else: self.right.insert(__lowercase ) else: __a = val def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" if root: inorder(root.left , _SCREAMING_SNAKE_CASE ) res.append(root.val ) inorder(root.right , _SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" if len(_SCREAMING_SNAKE_CASE ) == 0: return arr __a = Node(arr[0] ) for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): root.insert(arr[i] ) # Traverse BST in order. __a = [] inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
302
0
'''simple docstring''' # XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path __lowercase : List[Any] = Path(__file__).resolve().parents[3] / 'src' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) __lowercase : Dict = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'} __lowercase : int = 'zero2' __lowercase : Dict = 'zero3' __lowercase : List[Any] = [ZEROa, ZEROa] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any ): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param __a : Dict = parameterized.to_safe_name('_'.join(str(_SCREAMING_SNAKE_CASE ) for x in param.args ) ) return F"""{func.__name__}_{param_based_name}""" # Cartesian-product of zero stages with models to test __lowercase : List[Any] = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class __UpperCamelCase ( lowerCAmelCase_ ): @parameterized.expand(__a , name_func=__a ) def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' self.run_and_check( stage=__a , model=__a , distributed=__a , fpaa=__a , ) @require_torch_multi_gpu @parameterized.expand(__a , name_func=__a ) def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' self.run_and_check( stage=__a , model=__a , distributed=__a , fpaa=__a , ) @parameterized.expand(__a , name_func=__a ) def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' self.run_and_check( stage=__a , model=__a , distributed=__a , fpaa=__a , ) @require_torch_multi_gpu @parameterized.expand(__a , name_func=__a ) def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' self.run_and_check( stage=__a , model=__a , distributed=__a , fpaa=__a , ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' pass def __UpperCAmelCase ( self , __a , __a , __a = 10 , __a = True , __a = True , __a = True , ): '''simple docstring''' __a : Any = models[model] __a : Union[str, Any] = self.run_trainer( stage=__a , model_name=__a , eval_steps=__a , num_train_epochs=1 , distributed=__a , fpaa=__a , ) self.do_checks(__a ) return output_dir def __UpperCAmelCase ( self , __a , __a , __a = 10 , __a = 1 , __a = True , __a = True , ): '''simple docstring''' __a : Tuple = self.get_auto_remove_tmp_dir('./xxx' , after=__a ) __a : Optional[Any] = f""" --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(__a )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none """.split() if fpaa: args.extend(['--fp16'] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files __a : int = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split() __a : Union[str, Any] = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""] __a : Dict = self.get_launcher(__a ) __a : Tuple = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__a , env=self.get_env() ) return output_dir def __UpperCAmelCase ( self , __a=False ): '''simple docstring''' __a : int = min(2 , get_gpu_count() ) if distributed else 1 return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
27
import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): def UpperCamelCase_ ( self : str ): '''simple docstring''' __a = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__lowercase , """width_multiplier""" ) ) class SCREAMING_SNAKE_CASE : def __init__( self : Dict , __lowercase : Union[str, Any] , __lowercase : Dict=13 , __lowercase : int=64 , __lowercase : Tuple=2 , __lowercase : Tuple=3 , __lowercase : Tuple="swish" , __lowercase : List[Any]=3 , __lowercase : List[str]=32 , __lowercase : int=0.1 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[int]=True , __lowercase : Dict=True , __lowercase : Tuple=10 , __lowercase : str=None , __lowercase : Optional[Any]=0.25 , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0 , ): '''simple docstring''' __a = parent __a = batch_size __a = image_size __a = patch_size __a = num_channels __a = make_divisible(512 * width_multiplier , divisor=8 ) __a = hidden_act __a = conv_kernel_size __a = output_stride __a = classifier_dropout_prob __a = use_labels __a = is_training __a = num_labels __a = initializer_range __a = scope __a = width_multiplier __a = ffn_dropout __a = attn_dropout def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' __a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a = None __a = None if self.use_labels: __a = ids_tensor([self.batch_size] , self.num_labels ) __a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __a = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple ): '''simple docstring''' __a = MobileViTVaModel(config=__lowercase ) model.to(__lowercase ) model.eval() __a = model(__lowercase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] ): '''simple docstring''' __a = self.num_labels __a = MobileViTVaForImageClassification(__lowercase ) model.to(__lowercase ) model.eval() __a = model(__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : int , __lowercase : str , __lowercase : Any , __lowercase : int , __lowercase : List[str] ): '''simple docstring''' __a = self.num_labels __a = MobileViTVaForSemanticSegmentation(__lowercase ) model.to(__lowercase ) model.eval() __a = model(__lowercase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) __a = model(__lowercase , labels=__lowercase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCamelCase_ ( self : Dict ): '''simple docstring''' __a = self.prepare_config_and_inputs() __a , __a , __a , __a = config_and_inputs __a = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : List[Any] =( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) __lowerCamelCase : Any =( { 'feature-extraction': MobileViTVaModel, 'image-classification': MobileViTVaForImageClassification, 'image-segmentation': MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) __lowerCamelCase : Dict =False __lowerCamelCase : Optional[Any] =False __lowerCamelCase : int =False __lowerCamelCase : Any =False def UpperCamelCase_ ( self : Dict ): '''simple docstring''' __a = MobileViTVaModelTester(self ) __a = MobileViTVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase ) def UpperCamelCase_ ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" ) def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" ) def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' pass @unittest.skip(reason="""MobileViTV2 does not output attentions""" ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" ) def UpperCamelCase_ ( self : int ): '''simple docstring''' pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' pass def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = model_class(__lowercase ) __a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a = [*signature.parameters.keys()] __a = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __lowercase ) def UpperCamelCase_ ( self : Dict ): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def UpperCamelCase_ ( self : int ): '''simple docstring''' def check_hidden_states_output(__lowercase : List[str] , __lowercase : Optional[int] , __lowercase : List[str] ): __a = model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): __a = model(**self._prepare_for_class(__lowercase , __lowercase ) ) __a = outputs.hidden_states __a = 5 self.assertEqual(len(__lowercase ) , __lowercase ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. __a = 2 for i in range(len(__lowercase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a = True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowercase ) def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase ) @slow def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a = MobileViTVaModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def lowerCAmelCase__ ( ): """simple docstring""" __a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self : Dict ): '''simple docstring''' return ( MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ) if is_vision_available() else None ) @slow def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' __a = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to( __lowercase ) __a = self.default_image_processor __a = prepare_img() __a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase ) # forward pass with torch.no_grad(): __a = model(**__lowercase ) # verify the logits __a = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowercase ) __a = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) ) @slow def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) __a = model.to(__lowercase ) __a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) __a = prepare_img() __a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase ) # forward pass with torch.no_grad(): __a = model(**__lowercase ) __a = outputs.logits # verify the logits __a = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , __lowercase ) __a = torch.tensor( [ [[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]], [[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]], [[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]], ] , device=__lowercase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) ) @slow def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) __a = model.to(__lowercase ) __a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) __a = prepare_img() __a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase ) # forward pass with torch.no_grad(): __a = model(**__lowercase ) __a = outputs.logits.detach().cpu() __a = image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] ) __a = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , __lowercase ) __a = image_processor.post_process_semantic_segmentation(outputs=__lowercase ) __a = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , __lowercase )
302
0
'''simple docstring''' import torch from torch import nn class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=1 , UpperCamelCase__ : int=False ): """simple docstring""" super().__init__() UpperCamelCase = n_token UpperCamelCase = d_embed UpperCamelCase = d_proj UpperCamelCase = cutoffs + [n_token] UpperCamelCase = [0] + self.cutoffs UpperCamelCase = div_val UpperCamelCase = self.cutoffs[0] UpperCamelCase = len(self.cutoffs ) - 1 UpperCamelCase = self.shortlist_size + self.n_clusters if self.n_clusters > 0: UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) ) UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters ) ) UpperCamelCase = nn.ModuleList() UpperCamelCase = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs ) ): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase__ , UpperCamelCase__ ) ) ) else: self.out_projs.append(UpperCamelCase__ ) self.out_layers.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) ) else: for i in range(len(self.cutoffs ) ): UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1] UpperCamelCase = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase__ , UpperCamelCase__ ) ) ) self.out_layers.append(nn.Linear(UpperCamelCase__ , r_idx - l_idx ) ) UpperCamelCase = keep_order def A ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ): """simple docstring""" if proj is None: UpperCamelCase = nn.functional.linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ ) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: UpperCamelCase = nn.functional.linear(UpperCamelCase__ , proj.t().contiguous() ) UpperCamelCase = nn.functional.linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ ) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def A ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : str=False ): """simple docstring""" if labels is not None: # Shift so that tokens < n predict n UpperCamelCase = hidden[..., :-1, :].contiguous() UpperCamelCase = labels[..., 1:].contiguous() UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) ) UpperCamelCase = labels.view(-1 ) if hidden.size(0 ) != labels.size(0 ): raise RuntimeError('Input and labels should have the same size in the batch dimension.' ) else: UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) ) if self.n_clusters == 0: UpperCamelCase = self._compute_logit(UpperCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) if labels is not None: UpperCamelCase = labels != -1_0_0 UpperCamelCase = torch.zeros_like(UpperCamelCase__ , dtype=hidden.dtype , device=hidden.device ) UpperCamelCase = ( -nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 ) ) else: UpperCamelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=-1 ) else: # construct weights and biases UpperCamelCase , UpperCamelCase = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1] UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx] UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx] else: UpperCamelCase = self.out_layers[i].weight UpperCamelCase = self.out_layers[i].bias if i == 0: UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 ) UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(UpperCamelCase__ ) biases.append(UpperCamelCase__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[0], biases[0], self.out_projs[0] UpperCamelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 ) if labels is None: UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) ) else: UpperCamelCase = torch.zeros_like(UpperCamelCase__ , dtype=hidden.dtype , device=hidden.device ) UpperCamelCase = 0 UpperCamelCase = [0] + self.cutoffs for i in range(len(UpperCamelCase__ ) - 1 ): UpperCamelCase , UpperCamelCase = cutoff_values[i], cutoff_values[i + 1] if labels is not None: UpperCamelCase = (labels >= l_idx) & (labels < r_idx) UpperCamelCase = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue UpperCamelCase = labels.index_select(0 , UpperCamelCase__ ) - l_idx UpperCamelCase = head_logprob.index_select(0 , UpperCamelCase__ ) UpperCamelCase = hidden.index_select(0 , UpperCamelCase__ ) else: UpperCamelCase = hidden if i == 0: if labels is not None: UpperCamelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 ) else: UpperCamelCase = head_logprob[:, : self.cutoffs[0]] else: UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[i], biases[i], self.out_projs[i] UpperCamelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 ) UpperCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: UpperCamelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1 , target_i[:, None] ).squeeze(1 ) else: UpperCamelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i UpperCamelCase = logprob_i if labels is not None: if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order: out.index_copy_(0 , UpperCamelCase__ , -logprob_i ) else: out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i ) offset += logprob_i.size(0 ) return out def A ( self : List[Any] , UpperCamelCase__ : str ): """simple docstring""" if self.n_clusters == 0: UpperCamelCase = self._compute_logit(UpperCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) return nn.functional.log_softmax(UpperCamelCase__ , dim=-1 ) else: # construct weights and biases UpperCamelCase , UpperCamelCase = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1] UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx] UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx] else: UpperCamelCase = self.out_layers[i].weight UpperCamelCase = self.out_layers[i].bias if i == 0: UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 ) UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(UpperCamelCase__ ) biases.append(UpperCamelCase__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[0], biases[0], self.out_projs[0] UpperCamelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) ) UpperCamelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 ) UpperCamelCase = [0] + self.cutoffs for i in range(len(UpperCamelCase__ ) - 1 ): UpperCamelCase , UpperCamelCase = cutoff_values[i], cutoff_values[i + 1] if i == 0: UpperCamelCase = head_logprob[:, : self.cutoffs[0]] else: UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[i], biases[i], self.out_projs[i] UpperCamelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 ) UpperCamelCase = head_logprob[:, -i] + tail_logprob_i UpperCamelCase = logprob_i return out
28
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
302
0
import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup __UpperCAmelCase = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582' } def lowercase__ ( __snake_case : str = "dhaka" , __snake_case : int = 5 ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = min(__snake_case , 50 ) # Prevent abuse! UpperCAmelCase_ : Dict = { 'q': query, 'tbm': 'isch', 'hl': 'en', 'ijn': '0', } UpperCAmelCase_ : Any = requests.get('https://www.google.com/search' , params=__snake_case , headers=__snake_case ) UpperCAmelCase_ : Dict = BeautifulSoup(html.text , 'html.parser' ) UpperCAmelCase_ : Any = ''.join( re.findall(R'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) ) UpperCAmelCase_ : int = json.dumps(__snake_case ) UpperCAmelCase_ : List[Any] = json.loads(__snake_case ) UpperCAmelCase_ : Union[str, Any] = re.findall( R'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , __snake_case , ) if not matched_google_image_data: return 0 UpperCAmelCase_ : Union[str, Any] = re.sub( R'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(__snake_case ) , ) UpperCAmelCase_ : Optional[int] = re.findall( R'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , __snake_case , ) for index, fixed_full_res_image in enumerate(__snake_case ): if index >= max_images: return index UpperCAmelCase_ : Optional[int] = bytes(__snake_case , 'ascii' ).decode( 'unicode-escape' ) UpperCAmelCase_ : Union[str, Any] = bytes(__snake_case , 'ascii' ).decode( 'unicode-escape' ) UpperCAmelCase_ : Union[str, Any] = urllib.request.build_opener() UpperCAmelCase_ : Dict = [ ( 'User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582', ) ] urllib.request.install_opener(__snake_case ) UpperCAmelCase_ : Union[str, Any] = F"query_{query.replace(' ' , '_' )}" if not os.path.exists(__snake_case ): os.makedirs(__snake_case ) urllib.request.urlretrieve( # noqa: S310 __snake_case , F"{path_name}/original_size_img_{index}.jpg" ) return index if __name__ == "__main__": try: __UpperCAmelCase = download_images_from_google_query(sys.argv[1]) print(F'{image_count} images were downloaded to disk.') except IndexError: print('Please provide a search term.') raise
29
import string import numpy def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" return b if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE ) class SCREAMING_SNAKE_CASE : __lowerCamelCase : List[str] =string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) __lowerCamelCase : List[Any] =numpy.vectorize(lambda lowerCamelCase__ : x % 36 ) __lowerCamelCase : Optional[Any] =numpy.vectorize(lowerCamelCase__ ) def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray ): '''simple docstring''' __a = self.modulus(__lowercase ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key __a = encrypt_key.shape[0] def UpperCamelCase_ ( self : Dict , __lowercase : str ): '''simple docstring''' return self.key_string.index(__lowercase ) def UpperCamelCase_ ( self : Dict , __lowercase : int ): '''simple docstring''' return self.key_string[round(__lowercase )] def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: __a = det % len(self.key_string ) __a = len(self.key_string ) if greatest_common_divisor(__lowercase , len(self.key_string ) ) != 1: __a = ( F"determinant modular {req_l} of encryption key({det}) " F"is not co prime w.r.t {req_l}.\nTry another key." ) raise ValueError(__lowercase ) def UpperCamelCase_ ( self : Dict , __lowercase : str ): '''simple docstring''' __a = [char for char in text.upper() if char in self.key_string] __a = chars[-1] while len(__lowercase ) % self.break_key != 0: chars.append(__lowercase ) return "".join(__lowercase ) def UpperCamelCase_ ( self : List[str] , __lowercase : str ): '''simple docstring''' __a = self.process_text(text.upper() ) __a = """""" for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ): __a = text[i : i + self.break_key] __a = [self.replace_letters(__lowercase ) for char in batch] __a = numpy.array([vec] ).T __a = self.modulus(self.encrypt_key.dot(__lowercase ) ).T.tolist()[ 0 ] __a = """""".join( self.replace_digits(__lowercase ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' __a = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: __a = det % len(self.key_string ) __a = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: __a = i break __a = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(__lowercase ) ) def UpperCamelCase_ ( self : Any , __lowercase : str ): '''simple docstring''' __a = self.make_decrypt_key() __a = self.process_text(text.upper() ) __a = """""" for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ): __a = text[i : i + self.break_key] __a = [self.replace_letters(__lowercase ) for char in batch] __a = numpy.array([vec] ).T __a = self.modulus(decrypt_key.dot(__lowercase ) ).T.tolist()[0] __a = """""".join( self.replace_digits(__lowercase ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def lowerCAmelCase__ ( ): """simple docstring""" __a = int(input("""Enter the order of the encryption key: """ ) ) __a = [] print("""Enter each row of the encryption key with space separated integers""" ) for _ in range(_SCREAMING_SNAKE_CASE ): __a = [int(_SCREAMING_SNAKE_CASE ) for x in input().split()] hill_matrix.append(_SCREAMING_SNAKE_CASE ) __a = HillCipher(numpy.array(_SCREAMING_SNAKE_CASE ) ) print("""Would you like to encrypt or decrypt some text? (1 or 2)""" ) __a = input("""\n1. Encrypt\n2. Decrypt\n""" ) if option == "1": __a = input("""What text would you like to encrypt?: """ ) print("""Your encrypted text is:""" ) print(hc.encrypt(_SCREAMING_SNAKE_CASE ) ) elif option == "2": __a = input("""What text would you like to decrypt?: """ ) print("""Your decrypted text is:""" ) print(hc.decrypt(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
302
0
import logging from transformers.configuration_utils import PretrainedConfig __a = logging.getLogger(__name__) class lowercase__( UpperCAmelCase ): """simple docstring""" a :Optional[int] = 'masked_bert' def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE_ : Any=1_2 , SCREAMING_SNAKE_CASE_ : str=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=5_1_2 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Any=1e-12 , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : Optional[int]="topK" , SCREAMING_SNAKE_CASE_ : Dict="constant" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Optional[Any]: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = hidden_act lowercase_ = intermediate_size lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = pruning_method lowercase_ = mask_init lowercase_ = mask_scale
30
from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : List[Any] ='autoformer' __lowerCamelCase : str ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self : List[Any] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : str = "student_t" , __lowercase : str = "nll" , __lowercase : int = 1 , __lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase : bool = True , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : Optional[List[int]] = None , __lowercase : Optional[List[int]] = None , __lowercase : int = 64 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 32 , __lowercase : int = 32 , __lowercase : str = "gelu" , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 100 , __lowercase : float = 0.02 , __lowercase : bool = True , __lowercase : List[Any]=True , __lowercase : int = 10 , __lowercase : int = 25 , __lowercase : int = 3 , **__lowercase : Optional[int] , ): '''simple docstring''' # time series specific configuration __a = prediction_length __a = context_length if context_length is not None else prediction_length __a = distribution_output __a = loss __a = input_size __a = num_time_features __a = lags_sequence __a = scaling __a = num_dynamic_real_features __a = num_static_real_features __a = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(__lowercase ) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""" ) __a = cardinality else: __a = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(__lowercase ) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""" ) __a = embedding_dimension else: __a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] __a = num_parallel_samples # Transformer architecture configuration __a = input_size * len(self.lags_sequence ) + self._number_of_features __a = d_model __a = encoder_attention_heads __a = decoder_attention_heads __a = encoder_ffn_dim __a = decoder_ffn_dim __a = encoder_layers __a = decoder_layers __a = dropout __a = attention_dropout __a = activation_dropout __a = encoder_layerdrop __a = decoder_layerdrop __a = activation_function __a = init_std __a = use_cache # Autoformer __a = label_length __a = moving_average __a = autocorrelation_factor super().__init__(is_encoder_decoder=__lowercase , **__lowercase ) @property def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
302
0
'''simple docstring''' import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class lowerCamelCase_ (snake_case__ ): '''simple docstring''' def __init__( self : List[Any] ): _UpperCAmelCase : Union[str, Any] = [] def _A ( self : Any , A : Union[str, Any] , A : Optional[int] , A : List[str] , **A : Tuple ): self.events.append("on_init_end" ) def _A ( self : Any , A : str , A : List[Any] , A : List[Any] , **A : Tuple ): self.events.append("on_train_begin" ) def _A ( self : Tuple , A : List[str] , A : Tuple , A : int , **A : List[str] ): self.events.append("on_train_end" ) def _A ( self : Optional[Any] , A : Dict , A : Any , A : Optional[Any] , **A : List[Any] ): self.events.append("on_epoch_begin" ) def _A ( self : Optional[Any] , A : List[Any] , A : List[str] , A : Optional[int] , **A : Optional[int] ): self.events.append("on_epoch_end" ) def _A ( self : List[str] , A : Optional[int] , A : List[Any] , A : Union[str, Any] , **A : Any ): self.events.append("on_step_begin" ) def _A ( self : Tuple , A : Union[str, Any] , A : int , A : Optional[int] , **A : int ): self.events.append("on_step_end" ) def _A ( self : Optional[int] , A : Optional[Any] , A : Union[str, Any] , A : str , **A : Union[str, Any] ): self.events.append("on_evaluate" ) def _A ( self : Optional[Any] , A : Optional[int] , A : Dict , A : List[Any] , **A : Dict ): self.events.append("on_predict" ) def _A ( self : Dict , A : Dict , A : List[Any] , A : Dict , **A : str ): self.events.append("on_save" ) def _A ( self : Tuple , A : Optional[Any] , A : Union[str, Any] , A : Optional[int] , **A : Dict ): self.events.append("on_log" ) def _A ( self : Optional[int] , A : Optional[Any] , A : Tuple , A : Tuple , **A : List[str] ): self.events.append("on_prediction_step" ) @require_torch class lowerCamelCase_ (unittest.TestCase ): '''simple docstring''' def _A ( self : Optional[int] ): _UpperCAmelCase : Optional[Any] = tempfile.mkdtemp() def _A ( self : List[Any] ): shutil.rmtree(self.output_dir ) def _A ( self : Union[str, Any] , A : Optional[int]=0 , A : Optional[Any]=0 , A : Optional[Any]=64 , A : Dict=64 , A : Any=None , A : Tuple=False , **A : Optional[int] ): # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. _UpperCAmelCase : str = RegressionDataset(length=A ) _UpperCAmelCase : Union[str, Any] = RegressionDataset(length=A ) _UpperCAmelCase : Any = RegressionModelConfig(a=A , b=A ) _UpperCAmelCase : List[Any] = RegressionPreTrainedModel(A ) _UpperCAmelCase : Dict = TrainingArguments(self.output_dir , disable_tqdm=A , report_to=[] , **A ) return Trainer( A , A , train_dataset=A , eval_dataset=A , callbacks=A , ) def _A ( self : str , A : List[str] , A : List[str] ): self.assertEqual(len(A ) , len(A ) ) # Order doesn't matter _UpperCAmelCase : Tuple = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ ) _UpperCAmelCase : Any = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ ) for cba, cba in zip(A , A ): if isinstance(A , A ) and isinstance(A , A ): self.assertEqual(A , A ) elif isinstance(A , A ) and not isinstance(A , A ): self.assertEqual(A , cba.__class__ ) elif not isinstance(A , A ) and isinstance(A , A ): self.assertEqual(cba.__class__ , A ) else: self.assertEqual(A , A ) def _A ( self : int , A : List[str] ): _UpperCAmelCase : List[str] = ["on_init_end", "on_train_begin"] _UpperCAmelCase : str = 0 _UpperCAmelCase : Optional[Any] = len(trainer.get_eval_dataloader() ) _UpperCAmelCase : Optional[int] = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"] for _ in range(trainer.state.num_train_epochs ): expected_events.append("on_epoch_begin" ) for _ in range(A ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("on_log" ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("on_save" ) expected_events.append("on_epoch_end" ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def _A ( self : str ): _UpperCAmelCase : Any = self.get_trainer() _UpperCAmelCase : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) # Callbacks passed at init are added to the default callbacks _UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback _UpperCAmelCase : List[Any] = self.get_trainer(disable_tqdm=A ) _UpperCAmelCase : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) def _A ( self : Optional[Any] ): _UpperCAmelCase : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback] _UpperCAmelCase : Dict = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(A ) expected_callbacks.remove(A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) _UpperCAmelCase : Optional[Any] = self.get_trainer() _UpperCAmelCase : Any = trainer.pop_callback(A ) self.assertEqual(cb.__class__ , A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) trainer.add_callback(A ) expected_callbacks.insert(0 , A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) # We can also add, pop, or remove by instance _UpperCAmelCase : Union[str, Any] = self.get_trainer() _UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[0] trainer.remove_callback(A ) expected_callbacks.remove(A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) _UpperCAmelCase : List[Any] = self.get_trainer() _UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[0] _UpperCAmelCase : Union[str, Any] = trainer.pop_callback(A ) self.assertEqual(A , A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) trainer.add_callback(A ) expected_callbacks.insert(0 , A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) def _A ( self : Optional[Any] ): import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="ignore" , category=A ) _UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() _UpperCAmelCase : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) # Independent log/save/eval _UpperCAmelCase : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() _UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) _UpperCAmelCase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() _UpperCAmelCase : Tuple = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) _UpperCAmelCase : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" ) trainer.train() _UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) _UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" ) trainer.train() _UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) # A bit of everything _UpperCAmelCase : int = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , ) trainer.train() _UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) # warning should be emitted for duplicated callbacks with patch("transformers.trainer_callback.logger.warning" ) as warn_mock: _UpperCAmelCase : Optional[Any] = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(A ) in warn_mock.call_args[0][0]
31
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
302
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase_ : List[Any] = { 'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json', } class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ ): snake_case__ : Optional[int] = '''bit''' snake_case__ : Optional[Any] = ['''preactivation''', '''bottleneck'''] snake_case__ : Tuple = ['''SAME''', '''VALID'''] def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=6_4 , SCREAMING_SNAKE_CASE__ : Optional[int]=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , SCREAMING_SNAKE_CASE__ : Optional[Any]=[3, 4, 6, 3] , SCREAMING_SNAKE_CASE__ : Optional[Any]="preactivation" , SCREAMING_SNAKE_CASE__ : Tuple="relu" , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Dict=3_2 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Tuple=3_2 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , **SCREAMING_SNAKE_CASE__ : str , ) -> str: super().__init__(**SCREAMING_SNAKE_CASE__ ) if layer_type not in self.layer_types: raise ValueError(F"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" ) if global_padding is not None: if global_padding.upper() in self.supported_padding: a_ : Any = global_padding.upper() else: raise ValueError(F"""Padding strategy {global_padding} not supported""" ) a_ : Optional[Any] = num_channels a_ : List[Any] = embedding_size a_ : Union[str, Any] = hidden_sizes a_ : List[str] = depths a_ : Any = layer_type a_ : Optional[int] = hidden_act a_ : Tuple = global_padding a_ : List[Any] = num_groups a_ : List[str] = drop_path_rate a_ : List[Any] = embedding_dynamic_padding a_ : int = output_stride a_ : str = width_factor a_ : Dict = ['stem'] + [F"""stage{idx}""" for idx in range(1 , len(SCREAMING_SNAKE_CASE__ ) + 1 )] a_ , a_ : List[Any] = get_aligned_output_features_output_indices( out_features=SCREAMING_SNAKE_CASE__ , out_indices=SCREAMING_SNAKE_CASE__ , stage_names=self.stage_names )
32
from __future__ import annotations lowerCamelCase__ = """#""" class SCREAMING_SNAKE_CASE : def __init__( self : Optional[Any] ): '''simple docstring''' __a = {} def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str ): '''simple docstring''' __a = self._trie for char in text: if char not in trie: __a = {} __a = trie[char] __a = True def UpperCamelCase_ ( self : Tuple , __lowercase : str ): '''simple docstring''' __a = self._trie for char in prefix: if char in trie: __a = trie[char] else: return [] return self._elements(__lowercase ) def UpperCamelCase_ ( self : Optional[int] , __lowercase : dict ): '''simple docstring''' __a = [] for c, v in d.items(): __a = [""" """] if c == END else [(c + s) for s in self._elements(__lowercase )] result.extend(__lowercase ) return tuple(__lowercase ) lowerCamelCase__ = Trie() lowerCamelCase__ = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""") for word in words: trie.insert_word(word) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" __a = trie.find_word(_SCREAMING_SNAKE_CASE ) return tuple(string + word for word in suffixes ) def lowerCAmelCase__ ( ): """simple docstring""" print(autocomplete_using_trie("""de""" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
302
0
"""simple docstring""" # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union __A : List[str] = re.compile(R'''^(?P<major>\d+)''' R'''\.(?P<minor>\d+)''' R'''\.(?P<patch>\d+)$''') @total_ordering @dataclass class _UpperCAmelCase : SCREAMING_SNAKE_CASE_ : str SCREAMING_SNAKE_CASE_ : Optional[str] = None SCREAMING_SNAKE_CASE_ : Optional[Union[str, int]] = None SCREAMING_SNAKE_CASE_ : Optional[Union[str, int]] = None SCREAMING_SNAKE_CASE_ : Optional[Union[str, int]] = None def A ( self : Optional[int] ) -> Union[str, Any]: lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = _str_to_version_tuple(self.version_str ) def __repr__( self : int ) -> List[Any]: return F'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}''' @property def A ( self : Any ) -> Union[str, Any]: return self.major, self.minor, self.patch def A ( self : Dict , A : List[Any] ) -> Tuple: if isinstance(A , A ): return Version(A ) elif isinstance(A , A ): return other raise TypeError(F'''{other} (type {type(A )}) cannot be compared to version.''' ) def __eq__( self : Union[str, Any] , A : str ) -> List[str]: try: lowercase_ : Optional[int] = self._validate_operand(A ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : int , A : str ) -> Union[str, Any]: lowercase_ : Optional[Any] = self._validate_operand(A ) return self.tuple < other.tuple def __hash__( self : Dict ) -> Optional[Any]: return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def A ( cls : int , A : List[Any] ) -> List[str]: lowercase_ : Dict = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def A ( self : Optional[int] ) -> str: return self.version_str def lowercase ( __snake_case : Any ): lowercase_ : int = _VERSION_REG.match(__snake_case ) if not res: raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' ) return tuple(int(__snake_case ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] ) def lowercase ( __snake_case : int ): return ".".join(str(__snake_case ) for v in version_tuple )
33
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : torch.FloatTensor class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self : Dict , __lowercase : int = 32 , __lowercase : int = 64 , __lowercase : int = 20 , __lowercase : int = 768 , __lowercase : Any=77 , __lowercase : Optional[int]=4 , __lowercase : float = 0.0 , __lowercase : str = "silu" , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "linear" , __lowercase : Optional[str] = "prd" , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , ): '''simple docstring''' super().__init__() __a = num_attention_heads __a = attention_head_dim __a = num_attention_heads * attention_head_dim __a = additional_embeddings __a = time_embed_dim or inner_dim __a = embedding_proj_dim or embedding_dim __a = clip_embed_dim or embedding_dim __a = Timesteps(__lowercase , __lowercase , 0 ) __a = TimestepEmbedding(__lowercase , __lowercase , out_dim=__lowercase , act_fn=__lowercase ) __a = nn.Linear(__lowercase , __lowercase ) if embedding_proj_norm_type is None: __a = None elif embedding_proj_norm_type == "layer": __a = nn.LayerNorm(__lowercase ) else: raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" ) __a = nn.Linear(__lowercase , __lowercase ) if encoder_hid_proj_type is None: __a = None elif encoder_hid_proj_type == "linear": __a = nn.Linear(__lowercase , __lowercase ) else: raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" ) __a = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowercase ) ) if added_emb_type == "prd": __a = nn.Parameter(torch.zeros(1 , 1 , __lowercase ) ) elif added_emb_type is None: __a = None else: raise ValueError( F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." ) __a = nn.ModuleList( [ BasicTransformerBlock( __lowercase , __lowercase , __lowercase , dropout=__lowercase , activation_fn="""gelu""" , attention_bias=__lowercase , ) for d in range(__lowercase ) ] ) if norm_in_type == "layer": __a = nn.LayerNorm(__lowercase ) elif norm_in_type is None: __a = None else: raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." ) __a = nn.LayerNorm(__lowercase ) __a = nn.Linear(__lowercase , __lowercase ) __a = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 ) causal_attention_mask.triu_(1 ) __a = causal_attention_mask[None, ...] self.register_buffer("""causal_attention_mask""" , __lowercase , persistent=__lowercase ) __a = nn.Parameter(torch.zeros(1 , __lowercase ) ) __a = nn.Parameter(torch.zeros(1 , __lowercase ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' __a = {} def fn_recursive_add_processors(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict[str, AttentionProcessor] ): if hasattr(__lowercase , """set_processor""" ): __a = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"{name}.{sub_name}" , __lowercase , __lowercase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__lowercase , __lowercase , __lowercase ) return processors def UpperCamelCase_ ( self : List[str] , __lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ): '''simple docstring''' __a = len(self.attn_processors.keys() ) if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != count: raise ValueError( F"A dict of processors was passed, but the number of processors {len(__lowercase )} does not match the" F" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict ): if hasattr(__lowercase , """set_processor""" ): if not isinstance(__lowercase , __lowercase ): module.set_processor(__lowercase ) else: module.set_processor(processor.pop(F"{name}.processor" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"{name}.{sub_name}" , __lowercase , __lowercase ) for name, module in self.named_children(): fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase ) def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' self.set_attn_processor(AttnProcessor() ) def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[torch.Tensor, float, int] , __lowercase : torch.FloatTensor , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.BoolTensor] = None , __lowercase : bool = True , ): '''simple docstring''' __a = hidden_states.shape[0] __a = timestep if not torch.is_tensor(__lowercase ): __a = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0: __a = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __a = timesteps * torch.ones(__lowercase , dtype=timesteps.dtype , device=timesteps.device ) __a = self.time_proj(__lowercase ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. __a = timesteps_projected.to(dtype=self.dtype ) __a = self.time_embedding(__lowercase ) if self.embedding_proj_norm is not None: __a = self.embedding_proj_norm(__lowercase ) __a = self.embedding_proj(__lowercase ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: __a = self.encoder_hidden_states_proj(__lowercase ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" ) __a = self.proj_in(__lowercase ) __a = self.positional_embedding.to(hidden_states.dtype ) __a = [] __a = 0 if encoder_hidden_states is not None: additional_embeds.append(__lowercase ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: __a = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: __a = hidden_states[:, None, :] __a = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: __a = self.prd_embedding.to(hidden_states.dtype ).expand(__lowercase , -1 , -1 ) additional_embeds.append(__lowercase ) __a = torch.cat( __lowercase , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens __a = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: __a = F.pad( __lowercase , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) __a = hidden_states + positional_embeddings if attention_mask is not None: __a = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0 __a = F.pad(__lowercase , (0, self.additional_embeddings) , value=0.0 ) __a = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) __a = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: __a = self.norm_in(__lowercase ) for block in self.transformer_blocks: __a = block(__lowercase , attention_mask=__lowercase ) __a = self.norm_out(__lowercase ) if self.prd_embedding is not None: __a = hidden_states[:, -1] else: __a = hidden_states[:, additional_embeddings_len:] __a = self.proj_to_clip_embeddings(__lowercase ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=__lowercase ) def UpperCamelCase_ ( self : Any , __lowercase : Tuple ): '''simple docstring''' __a = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
302
0
'''simple docstring''' import numpy as np from transformers import Pipeline def snake_case_ (_a : str ): UpperCAmelCase = np.max(_a , axis=-1 , keepdims=_a ) UpperCAmelCase = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_a ) class _a ( __a ): def A ( self : Tuple , **lowercase : Any ): '''simple docstring''' UpperCAmelCase = {} if "second_text" in kwargs: UpperCAmelCase = kwargs['''second_text'''] return preprocess_kwargs, {}, {} def A ( self : Union[str, Any] , lowercase : Any , lowercase : Dict=None ): '''simple docstring''' return self.tokenizer(lowercase , text_pair=lowercase , return_tensors=self.framework ) def A ( self : str , lowercase : List[Any] ): '''simple docstring''' return self.model(**lowercase ) def A ( self : Union[str, Any] , lowercase : Tuple ): '''simple docstring''' UpperCAmelCase = model_outputs.logits[0].numpy() UpperCAmelCase = softmax(lowercase ) UpperCAmelCase = np.argmax(lowercase ) UpperCAmelCase = self.model.config.idalabel[best_class] UpperCAmelCase = probabilities[best_class].item() UpperCAmelCase = logits.tolist() return {"label": label, "score": score, "logits": logits}
34
from functools import lru_cache def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" __a = 2 __a = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(_SCREAMING_SNAKE_CASE ) if n > 1: factors.add(_SCREAMING_SNAKE_CASE ) return factors @lru_cache def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" return len(unique_prime_factors(_SCREAMING_SNAKE_CASE ) ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ): """simple docstring""" return len(set(_SCREAMING_SNAKE_CASE ) ) in (0, 1) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" __a = 2 while True: # Increment each value of a generated range __a = [base + i for i in range(_SCREAMING_SNAKE_CASE )] # Run elements through out unique_prime_factors function # Append our target number to the end. __a = [upf_len(_SCREAMING_SNAKE_CASE ) for x in group] checker.append(_SCREAMING_SNAKE_CASE ) # If all numbers in the list are equal, return the group variable. if equality(_SCREAMING_SNAKE_CASE ): return group # Increment our base variable by 1 base += 1 def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 4 ): """simple docstring""" __a = run(_SCREAMING_SNAKE_CASE ) return results[0] if len(_SCREAMING_SNAKE_CASE ) else None if __name__ == "__main__": print(solution())
302
0
'''simple docstring''' import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __a = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. __a = direct_transformers_import(PATH_TO_TRANSFORMERS) __a = transformers.models.auto.configuration_auto.CONFIG_MAPPING __a = { # used to compute the property `self.chunk_length` "EncodecConfig": ["overlap"], # used as `self.bert_model = BertModel(config, ...)` "DPRConfig": True, # not used in modeling files, but it's an important information "FSMTConfig": ["langs"], # used internally in the configuration class file "GPTNeoConfig": ["attention_types"], # used internally in the configuration class file "EsmConfig": ["is_folding_model"], # used during training (despite we don't have training script for these models yet) "Mask2FormerConfig": ["ignore_value"], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) "OneFormerConfig": ["ignore_value", "norm"], # used during preprocessing and collation, see `collating_graphormer.py` "GraphormerConfig": ["spatial_pos_max"], # used internally in the configuration class file "T5Config": ["feed_forward_proj"], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally "MT5Config": ["feed_forward_proj", "tokenizer_class"], "UMT5Config": ["feed_forward_proj", "tokenizer_class"], # used internally in the configuration class file "LongT5Config": ["feed_forward_proj"], # used internally in the configuration class file "SwitchTransformersConfig": ["feed_forward_proj"], # having default values other than `1e-5` - we can't fix them without breaking "BioGptConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "GLPNConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "SegformerConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "CvtConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "PerceiverConfig": ["layer_norm_eps"], # used internally to calculate the feature size "InformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "AutoformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate `mlp_dim` "SamVisionConfig": ["mlp_ratio"], # For (head) training, but so far not implemented "ClapAudioConfig": ["num_classes"], # Not used, but providing useful information to users "SpeechT5HifiGanConfig": ["sampling_rate"], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { "CLIPSegConfig": True, "DeformableDetrConfig": True, "DetaConfig": True, "DinatConfig": True, "DonutSwinConfig": True, "EfficientFormerConfig": True, "FSMTConfig": True, "JukeboxConfig": True, "LayoutLMv2Config": True, "MaskFormerSwinConfig": True, "MT5Config": True, "NatConfig": True, "OneFormerConfig": True, "PerceiverConfig": True, "RagConfig": True, "SpeechT5Config": True, "SwinConfig": True, "Swin2SRConfig": True, "Swinv2Config": True, "SwitchTransformersConfig": True, "TableTransformerConfig": True, "TapasConfig": True, "TransfoXLConfig": True, "UniSpeechConfig": True, "UniSpeechSatConfig": True, "WavLMConfig": True, "WhisperConfig": True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) "JukeboxPriorConfig": True, # TODO: @Younes (for `is_decoder`) "Pix2StructTextConfig": True, } ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: snake_case__ : List[str] = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( f"config.{attribute}" in modeling_source or f"getattr(config, \"{attribute}\"" in modeling_source or f"getattr(self.config, \"{attribute}\"" in modeling_source ): snake_case__ : str = True # Deal with multi-line cases elif ( re.search( rf"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , _lowerCAmelCase , ) is not None ): snake_case__ : Optional[Any] = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: snake_case__ : Union[str, Any] = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files snake_case__ : List[Any] = [ """bos_index""", """eos_index""", """pad_index""", """unk_index""", """mask_index""", """image_size""", """use_cache""", """out_features""", """out_indices""", ] snake_case__ : Union[str, Any] = ["""encoder_no_repeat_ngram_size"""] # Special cases to be allowed snake_case__ : Union[str, Any] = True if not attribute_used: snake_case__ : Any = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: snake_case__ : Dict = True elif attribute in ["tie_word_embeddings"] and default_value is False: snake_case__ : Any = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: snake_case__ : Union[str, Any] = True elif attribute.endswith("""_token_id""" ): snake_case__ : Union[str, Any] = True # configuration class specific cases if not case_allowed: snake_case__ : str = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) snake_case__ : Tuple = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def __snake_case( _lowerCAmelCase ) -> str: snake_case__ : List[str] = dict(inspect.signature(config_class.__init__ ).parameters ) snake_case__ : Tuple = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]] snake_case__ : List[str] = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass snake_case__ : str = {} if len(config_class.attribute_map ) > 0: snake_case__ : Dict = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files snake_case__ : int = inspect.getsourcefile(_lowerCAmelCase ) snake_case__ : int = os.path.dirname(_lowerCAmelCase ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. snake_case__ : List[str] = [os.path.join(_lowerCAmelCase , _lowerCAmelCase ) for fn in os.listdir(_lowerCAmelCase ) if fn.startswith("""modeling_""" )] # Get the source code strings snake_case__ : Dict = [] for path in modeling_paths: if os.path.isfile(_lowerCAmelCase ): with open(_lowerCAmelCase ) as fp: modeling_sources.append(fp.read() ) snake_case__ : List[str] = [] for config_param, default_value in zip(_lowerCAmelCase , _lowerCAmelCase ): # `attributes` here is all the variant names for `config_param` snake_case__ : Tuple = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): unused_attributes.append(attributes[0] ) return sorted(_lowerCAmelCase ) def __snake_case( ) -> List[str]: snake_case__ : str = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) snake_case__ : List[Any] = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda _lowerCAmelCase : inspect.isclass(_lowerCAmelCase ) and issubclass(_lowerCAmelCase , _lowerCAmelCase ) and inspect.getmodule(_lowerCAmelCase ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: snake_case__ : Union[str, Any] = check_config_attributes_being_used(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: snake_case__ : Union[str, Any] = unused_attributes if len(_lowerCAmelCase ) > 0: snake_case__ : str = """The following configuration classes contain unused attributes in the corresponding modeling files:\n""" for name, attributes in configs_with_unused_attributes.items(): error += f"{name}: {attributes}\n" raise ValueError(_lowerCAmelCase ) if __name__ == "__main__": check_config_attributes()
35
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" __a = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: __a = 128 elif "12-12" in model_name: __a = 12 __a = 12 elif "14-14" in model_name: __a = 14 __a = 14 elif "16-16" in model_name: __a = 16 __a = 16 else: raise ValueError("""Model not supported""" ) __a = """huggingface/label-files""" if "speech-commands" in model_name: __a = 35 __a = """speech-commands-v2-id2label.json""" else: __a = 527 __a = """audioset-id2label.json""" __a = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) __a = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __a = idalabel __a = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" if "module.v" in name: __a = name.replace("""module.v""" , """audio_spectrogram_transformer""" ) if "cls_token" in name: __a = name.replace("""cls_token""" , """embeddings.cls_token""" ) if "dist_token" in name: __a = name.replace("""dist_token""" , """embeddings.distillation_token""" ) if "pos_embed" in name: __a = name.replace("""pos_embed""" , """embeddings.position_embeddings""" ) if "patch_embed.proj" in name: __a = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) # transformer blocks if "blocks" in name: __a = name.replace("""blocks""" , """encoder.layer""" ) if "attn.proj" in name: __a = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: __a = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: __a = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: __a = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: __a = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: __a = name.replace("""mlp.fc2""" , """output.dense""" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: __a = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" ) # classifier head if "module.mlp_head.0" in name: __a = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" ) if "module.mlp_head.1" in name: __a = name.replace("""module.mlp_head.1""" , """classifier.dense""" ) return name def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" for key in orig_state_dict.copy().keys(): __a = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "qkv" in key: __a = key.split(""".""" ) __a = int(key_split[3] ) __a = config.hidden_size if "weight" in key: __a = val[:dim, :] __a = val[dim : dim * 2, :] __a = val[-dim:, :] else: __a = val[:dim] __a = val[dim : dim * 2] __a = val[-dim:] else: __a = val return orig_state_dict def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" __a = [ """module.v.head.weight""", """module.v.head.bias""", """module.v.head_dist.weight""", """module.v.head_dist.bias""", ] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @torch.no_grad() def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str]=False ): """simple docstring""" __a = get_audio_spectrogram_transformer_config(_SCREAMING_SNAKE_CASE ) __a = { """ast-finetuned-audioset-10-10-0.4593""": ( """https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.450""": ( """https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448""": ( """https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448-v2""": ( """https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1""" ), """ast-finetuned-audioset-12-12-0.447""": ( """https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1""" ), """ast-finetuned-audioset-14-14-0.443""": ( """https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1""" ), """ast-finetuned-audioset-16-16-0.442""": ( """https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1""" ), """ast-finetuned-speech-commands-v2""": ( """https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1""" ), } # load original state_dict __a = model_name_to_url[model_name] __a = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" ) # remove some keys remove_keys(_SCREAMING_SNAKE_CASE ) # rename some keys __a = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load 🤗 model __a = ASTForAudioClassification(_SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 __a = -4.267_7393 if """speech-commands""" not in model_name else -6.84_5978 __a = 4.568_9974 if """speech-commands""" not in model_name else 5.565_4526 __a = 1024 if """speech-commands""" not in model_name else 128 __a = ASTFeatureExtractor(mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) if "speech-commands" in model_name: __a = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" ) __a = dataset[0]["""audio"""]["""array"""] else: __a = hf_hub_download( repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , ) __a , __a = torchaudio.load(_SCREAMING_SNAKE_CASE ) __a = waveform.squeeze().numpy() __a = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=1_6000 , return_tensors="""pt""" ) # forward pass __a = model(**_SCREAMING_SNAKE_CASE ) __a = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": __a = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": __a = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": __a = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": __a = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": __a = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": __a = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": __a = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": __a = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError("""Unknown model name""" ) if not torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ): raise ValueError("""Logits don't match""" ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f"Saving feature extractor to {pytorch_dump_folder_path}" ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: print("""Pushing model and feature extractor to the hub...""" ) model.push_to_hub(f"MIT/{model_name}" ) feature_extractor.push_to_hub(f"MIT/{model_name}" ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowerCamelCase__ = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
302
0
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _snake_case = { "configuration_trajectory_transformer": [ "TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrajectoryTransformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TrajectoryTransformerModel", "TrajectoryTransformerPreTrainedModel", "load_tf_weights_in_trajectory_transformer", ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys _snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
36
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: lowerCamelCase__ = None lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCamelCase__ = { """vocab_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""", }, """tokenizer_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""", }, } lowerCamelCase__ = { """albert-base-v1""": 512, """albert-large-v1""": 512, """albert-xlarge-v1""": 512, """albert-xxlarge-v1""": 512, """albert-base-v2""": 512, """albert-large-v2""": 512, """albert-xlarge-v2""": 512, """albert-xxlarge-v2""": 512, } lowerCamelCase__ = """▁""" class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : List[Any] =VOCAB_FILES_NAMES __lowerCamelCase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : Any =AlbertTokenizer def __init__( self : Tuple , __lowercase : Union[str, Any]=None , __lowercase : Optional[int]=None , __lowercase : int=True , __lowercase : Dict=True , __lowercase : str=False , __lowercase : str="[CLS]" , __lowercase : List[Any]="[SEP]" , __lowercase : Any="<unk>" , __lowercase : List[Any]="[SEP]" , __lowercase : List[Any]="<pad>" , __lowercase : Optional[Any]="[CLS]" , __lowercase : List[str]="[MASK]" , **__lowercase : str , ): '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. __a = ( AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token ) super().__init__( __lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , ) __a = do_lower_case __a = remove_space __a = keep_accents __a = vocab_file __a = False if not self.vocab_file else True def UpperCamelCase_ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ): '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase_ ( self : str , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ): '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__lowercase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return __a = os.path.join( __lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ): copyfile(self.vocab_file , __lowercase ) return (out_vocab_file,)
302
0
'''simple docstring''' import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' __lowercase : Any = ProphetNetTokenizer __lowercase : List[Any] = False def UpperCAmelCase_ ( self ) -> str: super().setUp() lowerCAmelCase__ : str = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] lowerCAmelCase__ : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: lowerCAmelCase__ : List[str] = """UNwant\u00E9d,running""" lowerCAmelCase__ : int = """unwanted, running""" return input_text, output_text def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : Union[str, Any] = self.tokenizer_class(self.vocab_file ) lowerCAmelCase__ : Tuple = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(__UpperCAmelCase ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) ,[9, 6, 7, 12, 10, 11] ) def UpperCAmelCase_ ( self ) -> Any: lowerCAmelCase__ : Union[str, Any] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] ) def UpperCAmelCase_ ( self ) -> List[Any]: lowerCAmelCase__ : Tuple = BasicTokenizer(do_lower_case=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Dict = BasicTokenizer(do_lower_case=__UpperCAmelCase ,strip_accents=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : Optional[int] = BasicTokenizer(do_lower_case=__UpperCAmelCase ,strip_accents=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] ) def UpperCAmelCase_ ( self ) -> Dict: lowerCAmelCase__ : str = BasicTokenizer(do_lower_case=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] ) def UpperCAmelCase_ ( self ) -> List[str]: lowerCAmelCase__ : Optional[int] = BasicTokenizer(do_lower_case=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCAmelCase_ ( self ) -> Dict: lowerCAmelCase__ : Optional[Any] = BasicTokenizer(do_lower_case=__UpperCAmelCase ,strip_accents=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : Optional[int] = BasicTokenizer(do_lower_case=__UpperCAmelCase ,strip_accents=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCAmelCase_ ( self ) -> List[Any]: lowerCAmelCase__ : Optional[int] = BasicTokenizer(do_lower_case=__UpperCAmelCase ,never_split=["""[UNK]"""] ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] ) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : List[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] lowerCAmelCase__ : List[Any] = {} for i, token in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : int = i lowerCAmelCase__ : Dict = WordpieceTokenizer(vocab=__UpperCAmelCase ,unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) ,[] ) self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] ) @require_torch def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : List[Any] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" ) lowerCAmelCase__ : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] lowerCAmelCase__ : Optional[int] = [1037, 2146, 2_0423, 2005, 7680, 7849, 3989, 1012, 102] lowerCAmelCase__ : Any = tokenizer(__UpperCAmelCase ,padding=__UpperCAmelCase ,return_tensors="""pt""" ) self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase ) self.assertEqual((2, 9) ,batch.input_ids.shape ) self.assertEqual((2, 9) ,batch.attention_mask.shape ) def UpperCAmelCase_ ( self ) -> int: self.assertTrue(_is_whitespace(""" """ ) ) self.assertTrue(_is_whitespace("""\t""" ) ) self.assertTrue(_is_whitespace("""\r""" ) ) self.assertTrue(_is_whitespace("""\n""" ) ) self.assertTrue(_is_whitespace("""\u00A0""" ) ) self.assertFalse(_is_whitespace("""A""" ) ) self.assertFalse(_is_whitespace("""-""" ) ) def UpperCAmelCase_ ( self ) -> Optional[int]: self.assertTrue(_is_control("""\u0005""" ) ) self.assertFalse(_is_control("""A""" ) ) self.assertFalse(_is_control(""" """ ) ) self.assertFalse(_is_control("""\t""" ) ) self.assertFalse(_is_control("""\r""" ) ) def UpperCAmelCase_ ( self ) -> List[Any]: self.assertTrue(_is_punctuation("""-""" ) ) self.assertTrue(_is_punctuation("""$""" ) ) self.assertTrue(_is_punctuation("""`""" ) ) self.assertTrue(_is_punctuation(""".""" ) ) self.assertFalse(_is_punctuation("""A""" ) ) self.assertFalse(_is_punctuation(""" """ ) ) @slow def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" ) lowerCAmelCase__ : Any = tokenizer.encode("""sequence builders""" ,add_special_tokens=__UpperCAmelCase ) lowerCAmelCase__ : List[str] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=__UpperCAmelCase ) lowerCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase ,__UpperCAmelCase ) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
37
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Optional[int] =(IPNDMScheduler,) __lowerCamelCase : int =(('num_inference_steps', 50),) def UpperCamelCase_ ( self : str , **__lowercase : Dict ): '''simple docstring''' __a = {"""num_train_timesteps""": 1000} config.update(**__lowercase ) return config def UpperCamelCase_ ( self : Any , __lowercase : Tuple=0 , **__lowercase : Dict ): '''simple docstring''' __a = dict(self.forward_default_kwargs ) __a = kwargs.pop("""num_inference_steps""" , __lowercase ) __a = self.dummy_sample __a = 0.1 * sample __a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __a = self.get_scheduler_config(**__lowercase ) __a = scheduler_class(**__lowercase ) scheduler.set_timesteps(__lowercase ) # copy over dummy past residuals __a = dummy_past_residuals[:] if time_step is None: __a = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowercase ) __a = scheduler_class.from_pretrained(__lowercase ) new_scheduler.set_timesteps(__lowercase ) # copy over dummy past residuals __a = dummy_past_residuals[:] __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self : str ): '''simple docstring''' pass def UpperCamelCase_ ( self : str , __lowercase : int=0 , **__lowercase : Dict ): '''simple docstring''' __a = dict(self.forward_default_kwargs ) __a = kwargs.pop("""num_inference_steps""" , __lowercase ) __a = self.dummy_sample __a = 0.1 * sample __a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __a = self.get_scheduler_config() __a = scheduler_class(**__lowercase ) scheduler.set_timesteps(__lowercase ) # copy over dummy past residuals (must be after setting timesteps) __a = dummy_past_residuals[:] if time_step is None: __a = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowercase ) __a = scheduler_class.from_pretrained(__lowercase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowercase ) # copy over dummy past residual (must be after setting timesteps) __a = dummy_past_residuals[:] __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self : List[str] , **__lowercase : Dict ): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config(**__lowercase ) __a = scheduler_class(**__lowercase ) __a = 10 __a = self.dummy_model() __a = self.dummy_sample_deter scheduler.set_timesteps(__lowercase ) for i, t in enumerate(scheduler.timesteps ): __a = model(__lowercase , __lowercase ) __a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample for i, t in enumerate(scheduler.timesteps ): __a = model(__lowercase , __lowercase ) __a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample return sample def UpperCamelCase_ ( self : str ): '''simple docstring''' __a = dict(self.forward_default_kwargs ) __a = kwargs.pop("""num_inference_steps""" , __lowercase ) for scheduler_class in self.scheduler_classes: __a = self.get_scheduler_config() __a = scheduler_class(**__lowercase ) __a = self.dummy_sample __a = 0.1 * sample if num_inference_steps is not None and hasattr(__lowercase , """set_timesteps""" ): scheduler.set_timesteps(__lowercase ) elif num_inference_steps is not None and not hasattr(__lowercase , """set_timesteps""" ): __a = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] __a = dummy_past_residuals[:] __a = scheduler.timesteps[5] __a = scheduler.timesteps[6] __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=__lowercase , time_step=__lowercase ) def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=__lowercase , time_step=__lowercase ) def UpperCamelCase_ ( self : int ): '''simple docstring''' __a = self.full_loop() __a = torch.mean(torch.abs(__lowercase ) ) assert abs(result_mean.item() - 2540529 ) < 10
302
0
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCAmelCase_ : Tuple = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] ) -> Union[str, Any]: """simple docstring""" UpperCamelCase :Union[str, Any] = R"""\w+[.]\d+""" UpperCamelCase :int = re.findall(__magic_name__ , __magic_name__ ) for pat in pats: UpperCamelCase :List[str] = key.replace(__magic_name__ , """_""".join(pat.split(""".""" ) ) ) return key def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] ) -> Tuple: """simple docstring""" UpperCamelCase :Optional[Any] = pt_tuple_key[:-1] + ("""scale""",) if ( any("""norm""" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): UpperCamelCase :List[str] = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: UpperCamelCase :Optional[Any] = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: UpperCamelCase :Optional[Any] = pt_tuple_key[:-1] + ("""embedding""",) return renamed_pt_tuple_key, pt_tensor # conv layer UpperCamelCase :Dict = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: UpperCamelCase :Any = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer UpperCamelCase :Dict = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight": UpperCamelCase :Optional[Any] = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight UpperCamelCase :str = pt_tuple_key[:-1] + ("""weight""",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias UpperCamelCase :Optional[int] = pt_tuple_key[:-1] + ("""bias""",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=42 ) -> Union[str, Any]: """simple docstring""" UpperCamelCase :Any = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params UpperCamelCase :Tuple = flax_model.init_weights(PRNGKey(__magic_name__ ) ) UpperCamelCase :Any = flatten_dict(__magic_name__ ) UpperCamelCase :Optional[int] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): UpperCamelCase :Union[str, Any] = rename_key(__magic_name__ ) UpperCamelCase :Optional[Any] = tuple(renamed_pt_key.split(""".""" ) ) # Correctly rename weight parameters UpperCamelCase , UpperCamelCase :Tuple = rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # also add unexpected weight so that warning is thrown UpperCamelCase :List[Any] = jnp.asarray(__magic_name__ ) return unflatten_dict(__magic_name__ )
38
from __future__ import annotations lowerCamelCase__ = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } class SCREAMING_SNAKE_CASE : def __init__( self : Tuple , __lowercase : dict[str, list[str]] , __lowercase : str ): '''simple docstring''' __a = graph # mapping node to its parent in resulting breadth first tree __a = {} __a = source_vertex def UpperCamelCase_ ( self : Any ): '''simple docstring''' __a = {self.source_vertex} __a = None __a = [self.source_vertex] # first in first out queue while queue: __a = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(__lowercase ) __a = vertex queue.append(__lowercase ) def UpperCamelCase_ ( self : Optional[int] , __lowercase : str ): '''simple docstring''' if target_vertex == self.source_vertex: return self.source_vertex __a = self.parent.get(__lowercase ) if target_vertex_parent is None: __a = ( F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" ) raise ValueError(__lowercase ) return self.shortest_path(__lowercase ) + F"->{target_vertex}" if __name__ == "__main__": lowerCamelCase__ = Graph(graph, """G""") g.breath_first_search() print(g.shortest_path("""D""")) print(g.shortest_path("""G""")) print(g.shortest_path("""Foo"""))
302
0
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel _a = { '''gwf-440k''': { '''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''', '''sample_rate''': 48000, '''sample_size''': 65536, }, '''jmann-small-190k''': { '''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''', '''sample_rate''': 48000, '''sample_size''': 65536, }, '''jmann-large-580k''': { '''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''', '''sample_rate''': 48000, '''sample_size''': 131072, }, '''maestro-uncond-150k''': { '''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''', '''sample_rate''': 16000, '''sample_size''': 65536, }, '''unlocked-uncond-250k''': { '''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''', '''sample_rate''': 16000, '''sample_size''': 65536, }, '''honk-140k''': { '''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''', '''sample_rate''': 16000, '''sample_size''': 65536, }, } def __A ( __lowerCAmelCase , __lowerCAmelCase )-> int: """simple docstring""" return torch.atana(__lowerCAmelCase , __lowerCAmelCase ) / math.pi * 2 def __A ( __lowerCAmelCase )-> int: """simple docstring""" _UpperCAmelCase = torch.sin(t * math.pi / 2 ) ** 2 _UpperCAmelCase = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(__lowerCAmelCase , __lowerCAmelCase ) class __lowerCamelCase ( snake_case__): """simple docstring""" pass class __lowerCamelCase ( nn.Module): """simple docstring""" def __init__( self , UpperCAmelCase ): """simple docstring""" super().__init__() _UpperCAmelCase = DiffusionAttnUnetaD(UpperCAmelCase , n_attn_layers=4 ) _UpperCAmelCase = deepcopy(self.diffusion ) _UpperCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=UpperCAmelCase ) def __A ( __lowerCAmelCase )-> Any: """simple docstring""" _UpperCAmelCase = MODELS_MAP[model_name]['url'] os.system(F"""wget {url} ./""" ) return F"""./{model_name}.ckpt""" _a = { '''1''': '''resnets.0''', '''2''': '''attentions.0''', '''3''': '''resnets.1''', '''4''': '''attentions.1''', '''5''': '''resnets.2''', '''6''': '''attentions.2''', } _a = { '''8''': '''resnets.0''', '''9''': '''attentions.0''', '''10''': '''resnets.1''', '''11''': '''attentions.1''', '''12''': '''resnets.2''', '''13''': '''attentions.2''', } _a = { '''1''': '''resnets.0''', '''2''': '''attentions.0''', '''3''': '''resnets.1''', '''4''': '''attentions.1''', '''5''': '''resnets.2''', '''6''': '''attentions.2''', '''8''': '''resnets.3''', '''9''': '''attentions.3''', '''10''': '''resnets.4''', '''11''': '''attentions.4''', '''12''': '''resnets.5''', '''13''': '''attentions.5''', } _a = { '''0''': '''resnets.0''', '''1''': '''resnets.1''', '''2''': '''resnets.2''', '''4''': '''resnets.0''', '''5''': '''resnets.1''', '''6''': '''resnets.2''', } _a = { '''skip''': '''conv_skip''', '''main.0''': '''conv_1''', '''main.1''': '''group_norm_1''', '''main.3''': '''conv_2''', '''main.4''': '''group_norm_2''', } _a = { '''norm''': '''group_norm''', '''qkv_proj''': ['''query''', '''key''', '''value'''], '''out_proj''': ['''proj_attn'''], } def __A ( __lowerCAmelCase )-> Any: """simple docstring""" if name.startswith('skip' ): return name.replace('skip' , RES_CONV_MAP['skip'] ) # name has to be of format main.{digit} if not name.startswith('main.' ): raise ValueError(F"""ResConvBlock error with {name}""" ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def __A ( __lowerCAmelCase )-> List[Any]: """simple docstring""" for key, value in ATTN_MAP.items(): if name.startswith(__lowerCAmelCase ) and not isinstance(__lowerCAmelCase , __lowerCAmelCase ): return name.replace(__lowerCAmelCase , __lowerCAmelCase ) elif name.startswith(__lowerCAmelCase ): return [name.replace(__lowerCAmelCase , __lowerCAmelCase ) for v in value] raise ValueError(F"""Attn error with {name}""" ) def __A ( __lowerCAmelCase , __lowerCAmelCase=13 )-> List[str]: """simple docstring""" _UpperCAmelCase = input_string if string.split('.' )[0] == "timestep_embed": return string.replace('timestep_embed' , 'time_proj' ) _UpperCAmelCase = 0 if string.startswith('net.3.' ): depth += 1 _UpperCAmelCase = string[6:] elif string.startswith('net.' ): _UpperCAmelCase = string[4:] while string.startswith('main.7.' ): depth += 1 _UpperCAmelCase = string[7:] if string.startswith('main.' ): _UpperCAmelCase = string[5:] # mid block if string[:2].isdigit(): _UpperCAmelCase = string[:2] _UpperCAmelCase = string[2:] else: _UpperCAmelCase = string[0] _UpperCAmelCase = string[1:] if depth == max_depth: _UpperCAmelCase = MID_NUM_TO_LAYER[layer_num] _UpperCAmelCase = 'mid_block' elif depth > 0 and int(__lowerCAmelCase ) < 7: _UpperCAmelCase = DOWN_NUM_TO_LAYER[layer_num] _UpperCAmelCase = F"""down_blocks.{depth}""" elif depth > 0 and int(__lowerCAmelCase ) > 7: _UpperCAmelCase = UP_NUM_TO_LAYER[layer_num] _UpperCAmelCase = F"""up_blocks.{max_depth - depth - 1}""" elif depth == 0: _UpperCAmelCase = DEPTH_0_TO_LAYER[layer_num] _UpperCAmelCase = F"""up_blocks.{max_depth - 1}""" if int(__lowerCAmelCase ) > 3 else 'down_blocks.0' if not string_left.startswith('.' ): raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" ) _UpperCAmelCase = string_left[1:] if "resnets" in new_layer: _UpperCAmelCase = convert_resconv_naming(__lowerCAmelCase ) elif "attentions" in new_layer: _UpperCAmelCase = convert_attn_naming(__lowerCAmelCase ) _UpperCAmelCase = new_string_left if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase = prefix + '.' + new_layer + '.' + string_left else: _UpperCAmelCase = [prefix + '.' + new_layer + '.' + s for s in string_left] return new_string def __A ( __lowerCAmelCase )-> List[Any]: """simple docstring""" _UpperCAmelCase = {} for k, v in state_dict.items(): if k.endswith('kernel' ): # up- and downsample layers, don't have trainable weights continue _UpperCAmelCase = rename(__lowerCAmelCase ) # check if we need to transform from Conv => Linear for attention if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase = transform_conv_attns(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: _UpperCAmelCase = v return new_state_dict def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Optional[Any]: """simple docstring""" if len(__lowerCAmelCase ) == 1: if len(v.shape ) == 3: # weight _UpperCAmelCase = v[:, :, 0] else: # bias _UpperCAmelCase = v else: # qkv matrices _UpperCAmelCase = v.shape[0] _UpperCAmelCase = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: _UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0] else: _UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def __A ( __lowerCAmelCase )-> Tuple: """simple docstring""" _UpperCAmelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) _UpperCAmelCase = args.model_path.split('/' )[-1].split('.' )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}""" _UpperCAmelCase = download(__lowerCAmelCase ) _UpperCAmelCase = MODELS_MAP[model_name]['sample_rate'] _UpperCAmelCase = MODELS_MAP[model_name]['sample_size'] _UpperCAmelCase = Object() _UpperCAmelCase = sample_size _UpperCAmelCase = sample_rate _UpperCAmelCase = 0 _UpperCAmelCase = UNetaDModel(sample_size=__lowerCAmelCase , sample_rate=__lowerCAmelCase ) _UpperCAmelCase = diffusers_model.state_dict() _UpperCAmelCase = DiffusionUncond(__lowerCAmelCase ) orig_model.load_state_dict(torch.load(args.model_path , map_location=__lowerCAmelCase )['state_dict'] ) _UpperCAmelCase = orig_model.diffusion_ema.eval() _UpperCAmelCase = orig_model.state_dict() _UpperCAmelCase = rename_orig_weights(__lowerCAmelCase ) _UpperCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) _UpperCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(__lowerCAmelCase ) == 0, F"""Problem with {renamed_minus_diffusers}""" assert all(k.endswith('kernel' ) for k in list(__lowerCAmelCase ) ), F"""Problem with {diffusers_minus_renamed}""" for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}""" if key == "time_proj.weight": _UpperCAmelCase = value.squeeze() _UpperCAmelCase = value diffusers_model.load_state_dict(__lowerCAmelCase ) _UpperCAmelCase = 100 _UpperCAmelCase = 33 _UpperCAmelCase = IPNDMScheduler(num_train_timesteps=__lowerCAmelCase ) _UpperCAmelCase = torch.manual_seed(__lowerCAmelCase ) _UpperCAmelCase = torch.randn([1, 2, config.sample_size] , generator=__lowerCAmelCase ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=__lowerCAmelCase )[:-1] _UpperCAmelCase = get_crash_schedule(__lowerCAmelCase ) _UpperCAmelCase = DanceDiffusionPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase ) _UpperCAmelCase = torch.manual_seed(33 ) _UpperCAmelCase = pipe(num_inference_steps=__lowerCAmelCase , generator=__lowerCAmelCase ).audios _UpperCAmelCase = sampling.iplms_sample(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , {} ) _UpperCAmelCase = generated.clamp(-1 , 1 ) _UpperCAmelCase = (generated - audio).abs().sum() _UpperCAmelCase = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print('Diff sum' , __lowerCAmelCase ) print('Diff max' , __lowerCAmelCase ) assert diff_max < 1E-3, F"""Diff max: {diff_max} is too much :-/""" print(F"""Conversion for {model_name} successful!""" ) if __name__ == "__main__": _a = argparse.ArgumentParser() parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''') parser.add_argument( '''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.''' ) parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''') _a = parser.parse_args() main(args)
39
import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Tuple =KandinskyVaaPriorPipeline __lowerCamelCase : Union[str, Any] =['prompt'] __lowerCamelCase : Any =['prompt', 'negative_prompt'] __lowerCamelCase : List[str] =[ 'num_images_per_prompt', 'generator', 'num_inference_steps', 'latents', 'negative_prompt', 'guidance_scale', 'output_type', 'return_dict', ] __lowerCamelCase : List[Any] =False @property def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return 32 @property def UpperCamelCase_ ( self : Any ): '''simple docstring''' return 32 @property def UpperCamelCase_ ( self : str ): '''simple docstring''' return self.time_input_dim @property def UpperCamelCase_ ( self : str ): '''simple docstring''' return self.time_input_dim * 4 @property def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return 100 @property def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' __a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' torch.manual_seed(0 ) __a = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(__lowercase ) @property def UpperCamelCase_ ( self : int ): '''simple docstring''' torch.manual_seed(0 ) __a = { """num_attention_heads""": 2, """attention_head_dim""": 12, """embedding_dim""": self.text_embedder_hidden_size, """num_layers""": 1, } __a = PriorTransformer(**__lowercase ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 __a = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' torch.manual_seed(0 ) __a = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) __a = CLIPVisionModelWithProjection(__lowercase ) return model @property def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' __a = CLIPImageProcessor( crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , ) return image_processor def UpperCamelCase_ ( self : str ): '''simple docstring''' __a = self.dummy_prior __a = self.dummy_image_encoder __a = self.dummy_text_encoder __a = self.dummy_tokenizer __a = self.dummy_image_processor __a = UnCLIPScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=10.0 , ) __a = { """prior""": prior, """image_encoder""": image_encoder, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """scheduler""": scheduler, """image_processor""": image_processor, } return components def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[str] , __lowercase : Any=0 ): '''simple docstring''' if str(__lowercase ).startswith("""mps""" ): __a = torch.manual_seed(__lowercase ) else: __a = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __a = { """prompt""": """horse""", """generator""": generator, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' __a = """cpu""" __a = self.get_dummy_components() __a = self.pipeline_class(**__lowercase ) __a = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __a = pipe(**self.get_dummy_inputs(__lowercase ) ) __a = output.image_embeds __a = pipe( **self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0] __a = image[0, -10:] __a = image_from_tuple[0, -10:] assert image.shape == (1, 32) __a = np.array( [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def UpperCamelCase_ ( self : Dict ): '''simple docstring''' __a = torch_device == """cpu""" __a = True __a = False self._test_inference_batch_single_identical( test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , ) @skip_mps def UpperCamelCase_ ( self : Any ): '''simple docstring''' __a = torch_device == """cpu""" __a = False self._test_attention_slicing_forward_pass( test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
302
0
"""simple docstring""" import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def lowercase ( A_=32 , A_=10 , A_=100 , A_=1_026 , A_=True , A_="data/tokenized_stories_train_wikitext103.jbl" , A_="igf_context_pairs.jbl" , )-> List[str]: '''simple docstring''' set_seed(3 ) # generate train_data and objective_set a , a : Dict = generate_datasets( A_ , A_ , number=A_ , min_len=1_026 , trim=A_ ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? a : List[Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) # load pretrained model a : Union[str, Any] = load_gpta("gpt2" ).to(A_ ) print("computing perplexity on objective set" ) a : int = compute_perplexity(A_ , A_ , A_ ).item() print("perplexity on objective set:" , A_ ) # collect igf pairs and save to file demo.jbl collect_objective_set(A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def lowercase ( A_ , A_=15 , A_=128 , A_=100 , A_="igf_model.pt" , )-> Any: '''simple docstring''' set_seed(42 ) # Load pre-trained model a : int = GPTaLMHeadModel.from_pretrained("gpt2" ) # Initialize secondary learner to use embedding weights of model a : int = SecondaryLearner(A_ ) # Train secondary learner a : Optional[Any] = train_secondary_learner( A_ , A_ , max_epochs=A_ , batch_size=A_ , eval_freq=100 , igf_model_path=A_ , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def lowercase ( A_ , A_ , A_ , A_=32 , A_=1_000 , A_=16 , A_=1.0 , A_=recopy_gpta , A_=None , A_=10 , A_="gpt2_finetuned.pt" , )-> Union[str, Any]: '''simple docstring''' a : Optional[Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) a : List[str] = RandomSampler(A_ ) a : Tuple = DataLoader(A_ , sampler=A_ ) a : Union[str, Any] = max_steps // (len(A_ )) + 1 a : List[Any] = 0 a : str = torch.zeros((1, context_len) , dtype=torch.long , device=A_ ) a , a , a : Dict = recopy_model(A_ , A_ , A_ ) model.train() if secondary_learner is not None: secondary_learner.to(A_ ) secondary_learner.eval() a : Tuple = [] a : List[Any] = 0 a : str = [] a : Optional[Any] = [] # Compute the performance of the transformer model at the beginning a : Union[str, Any] = compute_perplexity(A_ , A_ , A_ ) test_perps.append(A_ ) print("Test perplexity, step" , A_ , ":" , A_ ) for epoch in range(int(A_ ) ): for step, example in enumerate(A_ ): torch.cuda.empty_cache() a : str = random.randint(0 , example.size(2 ) - context_len - 1 ) a : List[Any] = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() a : List[str] = model(A_ , labels=A_ ) a : Optional[Any] = True if secondary_learner is not None: a : Tuple = secondary_learner.forward( torch.tensor(A_ , dtype=torch.long , device=A_ ).unsqueeze(0 ) )[0].item() observed_qs.append(float(A_ ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: a : List[Any] = -1 if predicted_q < threshold: a : Tuple = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) a : Union[str, Any] = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() a : Dict = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: a : Dict = compute_perplexity(A_ , A_ , A_ ) test_perps.append(A_ ) print("Test perplexity, step" , A_ , ":" , A_ ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , A_ ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def lowercase ( )-> Optional[int]: '''simple docstring''' a : Any = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" ) # Required parameters parser.add_argument( "--data_dir" , default=A_ , type=A_ , required=A_ , help="The input data dir. Should contain data files for WikiText." , ) parser.add_argument( "--model_name_or_path" , default=A_ , type=A_ , required=A_ , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--data_file" , type=A_ , default=A_ , help=( "A jbl file containing tokenized data which can be split as objective dataset, " "train_dataset and test_dataset." ) , ) parser.add_argument( "--igf_data_file" , type=A_ , default=A_ , help="A jbl file containing the context and information gain pairs to train secondary learner." , ) parser.add_argument( "--output_dir" , default=A_ , type=A_ , required=A_ , help="The output directory where the final fine-tuned model is stored." , ) parser.add_argument( "--tokenizer_name" , default=A_ , type=A_ , help="Pretrained tokenizer name or path if not the same as model_name" , ) parser.add_argument("--seed" , type=A_ , default=A_ , help="A seed for reproducible training." ) parser.add_argument( "--context_len" , default=32 , type=A_ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--size_objective_set" , default=100 , type=A_ , help="number of articles that are long enough to be used as our objective set" , ) parser.add_argument( "--eval_freq" , default=100 , type=A_ , help="secondary model evaluation is triggered at eval_freq" ) parser.add_argument("--max_steps" , default=1_000 , type=A_ , help="To calculate training epochs" ) parser.add_argument( "--secondary_learner_batch_size" , default=128 , type=A_ , help="batch size of training data for secondary learner" , ) parser.add_argument( "--batch_size" , default=16 , type=A_ , help="batch size of training data of language model(gpt2) " ) parser.add_argument( "--eval_interval" , default=10 , type=A_ , help=( "decay the selectivity of our secondary learner filter from" "1 standard deviation above average to 1 below average after 10 batches" ) , ) parser.add_argument( "--number" , default=100 , type=A_ , help="The number of examples split to be used as objective_set/test_data" ) parser.add_argument( "--min_len" , default=1_026 , type=A_ , help="The minimum length of the article to be used as objective set" ) parser.add_argument( "--secondary_learner_max_epochs" , default=15 , type=A_ , help="number of epochs to train secondary learner" ) parser.add_argument("--trim" , default=A_ , type=A_ , help="truncate the example if it exceeds context length" ) parser.add_argument( "--threshold" , default=1.0 , type=A_ , help=( "The threshold value used by secondary learner to filter the train_data and allow only" " informative data as input to the model" ) , ) parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=A_ , help="finetuned_model_name" ) parser.add_argument( "--recopy_model" , default=A_ , type=A_ , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=A_ , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , ) # Load train data for secondary learner a : Union[str, Any] = joblib.load("data/IGF_values.jbl" ) # Train secondary learner a : Optional[Any] = training_secondary_learner( A_ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , ) # load pretrained gpt2 model a : List[str] = GPTaLMHeadModel.from_pretrained("gpt2" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model a , a : str = generate_datasets( context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1_026 , trim=A_ ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( A_ , A_ , A_ , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=A_ , secondary_learner=A_ , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , ) if __name__ == "__main__": main()
40
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = Dict[str, Any] lowerCamelCase__ = List[Prediction] @add_end_docstrings(lowerCamelCase__ ) class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): def __init__( self : Tuple , *__lowercase : Tuple , **__lowercase : Optional[int] ): '''simple docstring''' super().__init__(*__lowercase , **__lowercase ) if self.framework == "tf": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) requires_backends(self , """vision""" ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def UpperCamelCase_ ( self : Optional[int] , **__lowercase : List[str] ): '''simple docstring''' __a = {} if "threshold" in kwargs: __a = kwargs["""threshold"""] return {}, {}, postprocess_kwargs def __call__( self : List[Any] , *__lowercase : Any , **__lowercase : Tuple ): '''simple docstring''' return super().__call__(*__lowercase , **__lowercase ) def UpperCamelCase_ ( self : str , __lowercase : Tuple ): '''simple docstring''' __a = load_image(__lowercase ) __a = torch.IntTensor([[image.height, image.width]] ) __a = self.image_processor(images=[image] , return_tensors="""pt""" ) if self.tokenizer is not None: __a = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" ) __a = target_size return inputs def UpperCamelCase_ ( self : Dict , __lowercase : List[str] ): '''simple docstring''' __a = model_inputs.pop("""target_size""" ) __a = self.model(**__lowercase ) __a = outputs.__class__({"""target_size""": target_size, **outputs} ) if self.tokenizer is not None: __a = model_inputs["""bbox"""] return model_outputs def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any]=0.9 ): '''simple docstring''' __a = model_outputs["""target_size"""] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. __a , __a = target_size[0].tolist() def unnormalize(__lowercase : Optional[Any] ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) __a , __a = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) __a = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] __a = [unnormalize(__lowercase ) for bbox in model_outputs["""bbox"""].squeeze(0 )] __a = ["""score""", """label""", """box"""] __a = [dict(zip(__lowercase , __lowercase ) ) for vals in zip(scores.tolist() , __lowercase , __lowercase ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel __a = self.image_processor.post_process_object_detection(__lowercase , __lowercase , __lowercase ) __a = raw_annotations[0] __a = raw_annotation["""scores"""] __a = raw_annotation["""labels"""] __a = raw_annotation["""boxes"""] __a = scores.tolist() __a = [self.model.config.idalabel[label.item()] for label in labels] __a = [self._get_bounding_box(__lowercase ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] __a = ["""score""", """label""", """box"""] __a = [ dict(zip(__lowercase , __lowercase ) ) for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] ) ] return annotation def UpperCamelCase_ ( self : Optional[int] , __lowercase : "torch.Tensor" ): '''simple docstring''' if self.framework != "pt": raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" ) __a , __a , __a , __a = box.int().tolist() __a = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
302
0
'''simple docstring''' import unittest import numpy as np def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , ) -> np.ndarray: lowerCamelCase__ : Tuple = np.shape(UpperCamelCase ) lowerCamelCase__ : Optional[int] = np.shape(UpperCamelCase ) lowerCamelCase__ : List[Any] = np.shape(UpperCamelCase ) if shape_a[0] != shape_b[0]: lowerCamelCase__ : List[str] = ( """Expected the same number of rows for A and B. """ f'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(UpperCamelCase ) if shape_b[1] != shape_c[1]: lowerCamelCase__ : Tuple = ( """Expected the same number of columns for B and C. """ f'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(UpperCamelCase ) lowerCamelCase__ : Optional[Any] = pseudo_inv if a_inv is None: try: lowerCamelCase__ : str = np.linalg.inv(UpperCamelCase ) except np.linalg.LinAlgError: raise ValueError( """Input matrix A is not invertible. Cannot compute Schur complement.""" ) return mat_c - mat_b.T @ a_inv @ mat_b class _lowercase ( unittest.TestCase ): def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCamelCase__ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCamelCase__ : Optional[Any] = np.array([[2, 1], [6, 3]] ) lowerCamelCase__ : Dict = schur_complement(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Any = np.block([[a, b], [b.T, c]] ) lowerCamelCase__ : List[Any] = np.linalg.det(UpperCamelCase__ ) lowerCamelCase__ : List[Any] = np.linalg.det(UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = np.linalg.det(UpperCamelCase__ ) self.assertAlmostEqual(UpperCamelCase__ , det_a * det_s ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCamelCase__ : Tuple = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCamelCase__ : str = np.array([[2, 1], [6, 3]] ) with self.assertRaises(UpperCamelCase__ ): schur_complement(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Dict = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCamelCase__ : int = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCamelCase__ : List[str] = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(UpperCamelCase__ ): schur_complement(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
41
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase__ = { """configuration_efficientnet""": [ """EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientNetConfig""", """EfficientNetOnnxConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""EfficientNetImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientNetForImageClassification""", """EfficientNetModel""", """EfficientNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
302
0
'''simple docstring''' import math def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> float: if ( not isinstance(__A , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('power_factor must be a valid float value between -1 and 1.' ) return apparent_power * power_factor def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> float: if ( not isinstance(__A , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('power_factor must be a valid float value between -1 and 1.' ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
42
import random def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" __a , __a , __a = [], [], [] for element in data: if element < pivot: less.append(_SCREAMING_SNAKE_CASE ) elif element > pivot: greater.append(_SCREAMING_SNAKE_CASE ) else: equal.append(_SCREAMING_SNAKE_CASE ) return less, equal, greater def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0: return None __a = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )] __a = 0 __a , __a , __a = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a = len(_SCREAMING_SNAKE_CASE ) __a = len(_SCREAMING_SNAKE_CASE ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # must be in larger else: return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
302
0
import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants __lowercase = Mapping[str, np.ndarray] __lowercase = Mapping[str, Any] # Is a nested dict. __lowercase = 0.0_1 @dataclasses.dataclass(frozen=UpperCAmelCase_ ) class lowerCamelCase_ : '''simple docstring''' a__ : np.ndarray # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. a__ : np.ndarray # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. a__ : np.ndarray # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. a__ : np.ndarray # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. a__ : np.ndarray # [num_res, num_atom_type] # Chain indices for multi-chain predictions a__ : Optional[np.ndarray] = None # Optional remark about the protein. Included as a comment in output PDB # files a__ : Optional[str] = None # Templates used to generate this protein (prediction-only) a__ : Optional[Sequence[str]] = None # Chain corresponding to each parent a__ : Optional[Sequence[int]] = None def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Optional[int] = R'''(\[[A-Z]+\]\n)''' __UpperCamelCase :List[str] = [tag.strip() for tag in re.split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) > 0] __UpperCamelCase :Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] ) __UpperCamelCase :List[str] = ["N", "CA", "C"] __UpperCamelCase :int = None __UpperCamelCase :Optional[int] = None __UpperCamelCase :Dict = None for g in groups: if "[PRIMARY]" == g[0]: __UpperCamelCase :Any = g[1][0].strip() for i in range(len(SCREAMING_SNAKE_CASE ) ): if seq[i] not in residue_constants.restypes: __UpperCamelCase :List[Any] = '''X''' # FIXME: strings are immutable __UpperCamelCase :Dict = np.array( [residue_constants.restype_order.get(SCREAMING_SNAKE_CASE , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: __UpperCamelCase :List[List[float]] = [] for axis in range(3 ): tertiary.append(list(map(SCREAMING_SNAKE_CASE , g[1][axis].split() ) ) ) __UpperCamelCase :Optional[Any] = np.array(SCREAMING_SNAKE_CASE ) __UpperCamelCase :Any = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(SCREAMING_SNAKE_CASE ): __UpperCamelCase :Union[str, Any] = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: __UpperCamelCase :List[str] = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) ) __UpperCamelCase :Union[str, Any] = np.zeros( ( len(SCREAMING_SNAKE_CASE ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(SCREAMING_SNAKE_CASE ): __UpperCamelCase :List[Any] = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=SCREAMING_SNAKE_CASE , atom_mask=SCREAMING_SNAKE_CASE , aatype=SCREAMING_SNAKE_CASE , residue_index=np.arange(len(SCREAMING_SNAKE_CASE ) ) , b_factors=SCREAMING_SNAKE_CASE , ) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 ): '''simple docstring''' __UpperCamelCase :List[str] = [] __UpperCamelCase :Optional[int] = prot.remark if remark is not None: pdb_headers.append(f"""REMARK {remark}""" ) __UpperCamelCase :Optional[int] = prot.parents __UpperCamelCase :Union[str, Any] = prot.parents_chain_index if parents is not None and parents_chain_index is not None: __UpperCamelCase :List[Any] = [p for i, p in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if i == chain_id] if parents is None or len(SCREAMING_SNAKE_CASE ) == 0: __UpperCamelCase :Tuple = ['''N/A'''] pdb_headers.append(f"""PARENT {' '.join(SCREAMING_SNAKE_CASE )}""" ) return pdb_headers def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :List[str] = [] __UpperCamelCase :Dict = pdb_str.split('''\n''' ) __UpperCamelCase :int = prot.remark if remark is not None: out_pdb_lines.append(f"""REMARK {remark}""" ) __UpperCamelCase :List[List[str]] if prot.parents is not None and len(prot.parents ) > 0: __UpperCamelCase :Dict = [] if prot.parents_chain_index is not None: __UpperCamelCase :Dict[str, List[str]] = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(SCREAMING_SNAKE_CASE ) , [] ) parent_dict[str(SCREAMING_SNAKE_CASE )].append(SCREAMING_SNAKE_CASE ) __UpperCamelCase :Union[str, Any] = max([int(SCREAMING_SNAKE_CASE ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): __UpperCamelCase :Union[str, Any] = parent_dict.get(str(SCREAMING_SNAKE_CASE ) , ['''N/A'''] ) parents_per_chain.append(SCREAMING_SNAKE_CASE ) else: parents_per_chain.append(list(prot.parents ) ) else: __UpperCamelCase :List[Any] = [['''N/A''']] def make_parent_line(SCREAMING_SNAKE_CASE ) -> str: return f"""PARENT {' '.join(SCREAMING_SNAKE_CASE )}""" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) __UpperCamelCase :Optional[Any] = 0 for i, l in enumerate(SCREAMING_SNAKE_CASE ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(SCREAMING_SNAKE_CASE ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(SCREAMING_SNAKE_CASE ): __UpperCamelCase :List[str] = parents_per_chain[chain_counter] else: __UpperCamelCase :Optional[int] = ['''N/A'''] out_pdb_lines.append(make_parent_line(SCREAMING_SNAKE_CASE ) ) return "\n".join(SCREAMING_SNAKE_CASE ) def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Tuple = residue_constants.restypes + ['''X'''] def res_atoa(SCREAMING_SNAKE_CASE ) -> str: return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' ) __UpperCamelCase :Optional[int] = residue_constants.atom_types __UpperCamelCase :List[str] = [] __UpperCamelCase :List[Any] = prot.atom_mask __UpperCamelCase :List[str] = prot.aatype __UpperCamelCase :Union[str, Any] = prot.atom_positions __UpperCamelCase :Any = prot.residue_index.astype(np.intaa ) __UpperCamelCase :List[str] = prot.b_factors __UpperCamelCase :List[str] = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('''Invalid aatypes.''' ) __UpperCamelCase :int = get_pdb_headers(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) > 0: pdb_lines.extend(SCREAMING_SNAKE_CASE ) __UpperCamelCase :str = aatype.shape[0] __UpperCamelCase :Optional[Any] = 1 __UpperCamelCase :List[Any] = 0 __UpperCamelCase :Any = string.ascii_uppercase __UpperCamelCase :Any = None # Add all atom sites. for i in range(SCREAMING_SNAKE_CASE ): __UpperCamelCase :List[Any] = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(SCREAMING_SNAKE_CASE , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue __UpperCamelCase :str = '''ATOM''' __UpperCamelCase :Tuple = atom_name if len(SCREAMING_SNAKE_CASE ) == 4 else f""" {atom_name}""" __UpperCamelCase :Optional[Any] = '''''' __UpperCamelCase :int = '''''' __UpperCamelCase :int = 1.00 __UpperCamelCase :int = atom_name[0] # Protein supports only C, N, O, S, this works. __UpperCamelCase :int = '''''' __UpperCamelCase :Dict = '''A''' if chain_index is not None: __UpperCamelCase :Union[str, Any] = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! __UpperCamelCase :str = ( f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}""" f"""{res_name_a:>3} {chain_tag:>1}""" f"""{residue_index[i]:>4}{insertion_code:>1} """ f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}""" f"""{occupancy:>6.2f}{b_factor:>6.2f} """ f"""{element:>2}{charge:>2}""" ) pdb_lines.append(SCREAMING_SNAKE_CASE ) atom_index += 1 __UpperCamelCase :Dict = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: __UpperCamelCase :int = True __UpperCamelCase :List[str] = chain_index[i + 1] if should_terminate: # Close the chain. __UpperCamelCase :Optional[Any] = '''TER''' __UpperCamelCase :Optional[int] = ( f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}""" ) pdb_lines.append(SCREAMING_SNAKE_CASE ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) pdb_lines.append('''END''' ) pdb_lines.append('''''' ) return "\n".join(SCREAMING_SNAKE_CASE ) def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , ): '''simple docstring''' return Protein( aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=SCREAMING_SNAKE_CASE , remark=SCREAMING_SNAKE_CASE , parents=SCREAMING_SNAKE_CASE , parents_chain_index=SCREAMING_SNAKE_CASE , )
43
from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline lowerCamelCase__ = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): def __init__( self : Optional[int] , **__lowercase : Dict ): '''simple docstring''' super().__init__(**__lowercase ) if self.framework != "pt": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) # No specific FOR_XXX available yet def __call__( self : str , __lowercase : Union[np.ndarray, bytes, str] , **__lowercase : int ): '''simple docstring''' return super().__call__(__lowercase , **__lowercase ) def UpperCamelCase_ ( self : List[Any] , **__lowercase : Union[str, Any] ): '''simple docstring''' __a = {} if "candidate_labels" in kwargs: __a = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: __a = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def UpperCamelCase_ ( self : int , __lowercase : Dict , __lowercase : Dict=None , __lowercase : str="This is a sound of {}." ): '''simple docstring''' if isinstance(__lowercase , __lowercase ): if audio.startswith("""http://""" ) or audio.startswith("""https://""" ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png __a = requests.get(__lowercase ).content else: with open(__lowercase , """rb""" ) as f: __a = f.read() if isinstance(__lowercase , __lowercase ): __a = ffmpeg_read(__lowercase , self.feature_extractor.sampling_rate ) if not isinstance(__lowercase , np.ndarray ): raise ValueError("""We expect a numpy ndarray as input""" ) if len(audio.shape ) != 1: raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" ) __a = self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" ) __a = candidate_labels __a = [hypothesis_template.format(__lowercase ) for x in candidate_labels] __a = self.tokenizer(__lowercase , return_tensors=self.framework , padding=__lowercase ) __a = [text_inputs] return inputs def UpperCamelCase_ ( self : Any , __lowercase : Any ): '''simple docstring''' __a = model_inputs.pop("""candidate_labels""" ) __a = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] , __lowercase ): __a = text_inputs[0] else: # Batching case. __a = text_inputs[0][0] __a = self.model(**__lowercase , **__lowercase ) __a = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_audio, } return model_outputs def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Dict ): '''simple docstring''' __a = model_outputs.pop("""candidate_labels""" ) __a = model_outputs["""logits"""][0] if self.framework == "pt": __a = logits.softmax(dim=0 ) __a = probs.tolist() else: raise ValueError("""`tf` framework not supported.""" ) __a = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(__lowercase , __lowercase ) , key=lambda __lowercase : -x[0] ) ] return result
302
0
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): _a : str = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right _a : str = 128_022 _a : Optional[Any] = 128_028 @require_sentencepiece class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): _UpperCamelCase : Optional[Any] = MaMaaaTokenizer _UpperCamelCase : int = False _UpperCamelCase : int = False _UpperCamelCase : int = True def __A ( self ): super().setUp() _lowerCAmelCase : Optional[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] _lowerCAmelCase : Dict = dict(zip(a__ , range(len(a__ ) ) ) ) _lowerCAmelCase : List[Any] = Path(self.tmpdirname ) save_json(a__ , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(a__ , save_dir / VOCAB_FILES_NAMES["""spm_file"""] ) _lowerCAmelCase : Any = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __A ( self , **a__ ): return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **a__ ) def __A ( self , a__ ): return ( "This is a test", "This is a test", ) def __A ( self ): _lowerCAmelCase : str = """</s>""" _lowerCAmelCase : Optional[Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ ) def __A ( self ): _lowerCAmelCase : Any = self.get_tokenizer() _lowerCAmelCase : Any = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """</s>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """<s>""" ) self.assertEqual(len(a__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip("""Skip this test while all models are still to be uploaded.""" ) def __A ( self ): pass def __A ( self ): _lowerCAmelCase : Optional[int] = self.get_tokenizer() _lowerCAmelCase : Tuple = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a__ ) , [2, 3, 4, 5, 6] , ) _lowerCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) _lowerCAmelCase : int = tokenizer.convert_tokens_to_string(a__ ) self.assertEqual(a__ , """This is a test""" ) @slow def __A ( self ): # fmt: off _lowerCAmelCase : Optional[int] = {"""input_ids""": [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a__ , model_name="""facebook/m2m100_418M""" , revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" , ) @require_torch @require_sentencepiece @require_tokenizers class __A ( unittest.TestCase ): _UpperCamelCase : Union[str, Any] = "facebook/m2m100_418M" _UpperCamelCase : Optional[Any] = [ "In my opinion, there are two levels of response from the French government.", "NSA Affair Emphasizes Complete Lack of Debate on Intelligence", ] _UpperCamelCase : Optional[int] = [ "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "L'affaire NSA souligne l'absence totale de débat sur le renseignement", ] # fmt: off _UpperCamelCase : List[str] = [EN_CODE, 593, 1_949, 115_781, 4, 71_586, 4_234, 60_633, 126_233, 432, 123_808, 15_592, 1_197, 117_132, 120_618, 5, 2] @classmethod def __A ( cls ): _lowerCAmelCase : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""en""" , tgt_lang="""fr""" ) _lowerCAmelCase : Tuple = 1 return cls def __A ( self ): self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) , 128006 ) self.assertEqual(self.tokenizer.get_lang_id("""en""" ) , 128022 ) self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) , 128076 ) self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) , 128063 ) def __A ( self ): _lowerCAmelCase : Tuple = self.tokenizer.get_vocab() self.assertEqual(len(a__ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab["""<unk>"""] , 3 ) self.assertIn(self.tokenizer.get_lang_token("""en""" ) , a__ ) def __A ( self ): _lowerCAmelCase : List[str] = """en""" _lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , a__ ) def __A ( self ): self.assertIn(a__ , self.tokenizer.all_special_ids ) # fmt: off _lowerCAmelCase : Union[str, Any] = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2] # fmt: on _lowerCAmelCase : str = self.tokenizer.decode(a__ , skip_special_tokens=a__ ) _lowerCAmelCase : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a__ ) self.assertEqual(a__ , a__ ) self.assertNotIn(self.tokenizer.eos_token , a__ ) def __A ( self ): _lowerCAmelCase : Dict = tempfile.mkdtemp() _lowerCAmelCase : Tuple = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(a__ ) _lowerCAmelCase : Tuple = MaMaaaTokenizer.from_pretrained(a__ ) self.assertDictEqual(new_tok.lang_token_to_id , a__ ) @require_torch def __A ( self ): _lowerCAmelCase : str = """en""" _lowerCAmelCase : int = """fr""" _lowerCAmelCase : Any = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a__ , return_tensors="""pt""" ) _lowerCAmelCase : int = shift_tokens_right( batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: _lowerCAmelCase : Any = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def __A ( self ): _lowerCAmelCase : Union[str, Any] = """mr""" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) _lowerCAmelCase : List[Any] = """zh""" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def __A ( self ): _lowerCAmelCase : Union[str, Any] = """mr""" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) _lowerCAmelCase : Union[str, Any] = """zh""" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def __A ( self ): _lowerCAmelCase : Tuple = self.tokenizer._build_translation_inputs("""A test""" , return_tensors="""pt""" , src_lang="""en""" , tgt_lang="""ar""" ) self.assertEqual( nested_simplify(a__ ) , { # en_XX, A, test, EOS """input_ids""": [[128022, 58, 4183, 2]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 128006, } , )
44
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowerCamelCase__ = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Dict =['pixel_values'] def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Dict , ): '''simple docstring''' super().__init__(**__lowercase ) __a = size if size is not None else {"""height""": 224, """width""": 224} __a = get_size_dict(__lowercase ) __a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __a = get_size_dict(__lowercase , default_to_square=__lowercase , param_name="""crop_size""" ) __a = do_resize __a = do_rescale __a = do_normalize __a = do_center_crop __a = crop_size __a = size __a = resample __a = rescale_factor __a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __a = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ): '''simple docstring''' __a = get_size_dict(__lowercase ) if "shortest_edge" in size: __a = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: __a = (size["""height"""], size["""width"""]) else: raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" ) return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase ) def UpperCamelCase_ ( self : str , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ): '''simple docstring''' __a = get_size_dict(__lowercase ) if "height" not in size or "width" not in size: raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" ) return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase ) def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str ): '''simple docstring''' return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase ) def UpperCamelCase_ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ): '''simple docstring''' return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase ) def UpperCamelCase_ ( self : Tuple , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : int = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : List[Any] , ): '''simple docstring''' __a = do_resize if do_resize is not None else self.do_resize __a = do_rescale if do_rescale is not None else self.do_rescale __a = do_normalize if do_normalize is not None else self.do_normalize __a = do_center_crop if do_center_crop is not None else self.do_center_crop __a = crop_size if crop_size is not None else self.crop_size __a = get_size_dict(__lowercase , param_name="""crop_size""" , default_to_square=__lowercase ) __a = resample if resample is not None else self.resample __a = rescale_factor if rescale_factor is not None else self.rescale_factor __a = image_mean if image_mean is not None else self.image_mean __a = image_std if image_std is not None else self.image_std __a = size if size is not None else self.size __a = get_size_dict(__lowercase ) if not is_batched(__lowercase ): __a = [images] if not valid_images(__lowercase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. __a = [to_numpy_array(__lowercase ) for image in images] if do_resize: __a = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images] if do_center_crop: __a = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images] if do_rescale: __a = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images] if do_normalize: __a = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images] __a = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images] __a = {"""pixel_values""": images} return BatchFeature(data=__lowercase , tensor_type=__lowercase )
302
0
"""simple docstring""" import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline lowercase_ = datasets.utils.logging.get_logger(__name__) @dataclass class __lowerCAmelCase ( datasets.BuilderConfig ): '''simple docstring''' __UpperCAmelCase : Optional[datasets.Features] = None __UpperCAmelCase : str = "utf-8" __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : bool = True # deprecated __UpperCAmelCase : Optional[int] = None # deprecated __UpperCAmelCase : int = 1_0 << 2_0 # 10MB __UpperCAmelCase : Optional[bool] = None class __lowerCAmelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = JsonConfig def __UpperCAmelCase ( self ): if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) __a = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def __UpperCAmelCase ( self , _a ): if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) __a = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_a , (str, list, tuple) ): __a = data_files if isinstance(_a , _a ): __a = [files] __a = [dl_manager.iter_files(_a ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __a = [] for split_name, files in data_files.items(): if isinstance(_a , _a ): __a = [files] __a = [dl_manager.iter_files(_a ) for file in files] splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) ) return splits def __UpperCAmelCase ( self , _a ): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): __a = self.config.features.arrow_schema.field(_a ).type __a = pa_table.append_column(_a , pa.array([None] * len(_a ) , type=_a ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example __a = table_cast(_a , self.config.features.arrow_schema ) return pa_table def __UpperCAmelCase ( self , _a ): for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: __a = json.load(_a ) # We keep only the field we are interested in __a = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_a , (list, tuple) ): __a = set().union(*[row.keys() for row in dataset] ) __a = {col: [row.get(_a ) for row in dataset] for col in keys} else: __a = dataset __a = pa.Table.from_pydict(_a ) yield file_idx, self._cast_table(_a ) # If the file has one json object per line else: with open(_a , '''rb''' ) as f: __a = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small __a = max(self.config.chunksize // 32 , 16 << 10 ) __a = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: __a = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_a ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": __a = batch.decode(self.config.encoding , errors=_a ).encode('''utf-8''' ) try: while True: try: __a = paj.read_json( io.BytesIO(_a ) , read_options=paj.ReadOptions(block_size=_a ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_a , pa.ArrowInvalid ) and "straddling" not in str(_a ) or block_size > len(_a ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f'''Batch of {len(_a )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( _a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: __a = json.load(_a ) except json.JSONDecodeError: logger.error(f'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_a , _a ): # list is the only sequence type supported in JSON try: __a = set().union(*[row.keys() for row in dataset] ) __a = {col: [row.get(_a ) for row in dataset] for col in keys} __a = pa.Table.from_pydict(_a ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' ) raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None yield file_idx, self._cast_table(_a ) break else: logger.error(f'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' ) raise ValueError( f'''Not able to read records in the JSON file at {file}. ''' f'''You should probably indicate the field of the JSON file containing your records. ''' f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ''' f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_a ) batch_idx += 1
45
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' __a = """hf-internal-testing/tiny-random-t5""" __a = AutoTokenizer.from_pretrained(__lowercase ) __a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ) __a = tokenizer("""This is me""" , return_tensors="""pt""" ) __a = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) __a = model.generate(**__lowercase ) __a = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowercase ) __a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) __a = model_reloaded.generate(**__lowercase ) self.assertTrue(torch.allclose(__lowercase , __lowercase ) ) def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' __a = """hf-internal-testing/tiny-random-t5""" __a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ) __a = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(__lowercase ): model.save_pretrained(__lowercase ) __a = model.reverse_bettertransformer() model.save_pretrained(__lowercase )
302
0
"""simple docstring""" import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) class lowercase ( _UpperCAmelCase ): _SCREAMING_SNAKE_CASE = 'summarization' _SCREAMING_SNAKE_CASE = ['loss'] _SCREAMING_SNAKE_CASE = ROUGE_KEYS _SCREAMING_SNAKE_CASE = 'rouge2' def __init__( self , lowercase , **lowercase ) -> str: if hparams.sortish_sampler and hparams.gpus > 1: lowerCAmelCase = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" ) if hparams.sortish_sampler: raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" ) super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase ) use_task_specific_params(self.model , """summarization""" ) save_git_info(self.hparams.output_dir ) lowerCAmelCase = Path(self.output_dir ) / """metrics.json""" lowerCAmelCase = Path(self.output_dir ) / """hparams.pkl""" pickle_save(self.hparams , self.hparams_save_path ) lowerCAmelCase = 0 lowerCAmelCase = defaultdict(lowercase ) lowerCAmelCase = self.config.model_type lowerCAmelCase = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size lowerCAmelCase = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } lowerCAmelCase = { """train""": self.hparams.n_train, """val""": self.hparams.n_val, """test""": self.hparams.n_test, } lowerCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} lowerCAmelCase = { """train""": self.hparams.max_target_length, """val""": self.hparams.val_max_target_length, """test""": self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}' assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}' if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) lowerCAmelCase = get_git_info()["""repo_sha"""] lowerCAmelCase = hparams.num_workers lowerCAmelCase = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ): lowerCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang] lowerCAmelCase = self.decoder_start_token_id lowerCAmelCase = ( SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset ) lowerCAmelCase = False lowerCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: lowerCAmelCase = self.hparams.eval_max_gen_length else: lowerCAmelCase = self.model.config.max_length lowerCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def _snake_case ( self , lowercase ) -> Dict[str, List[str]]: lowerCAmelCase = { k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items() } save_json(lowercase , Path(self.output_dir ) / """text_batch.json""" ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" ) lowerCAmelCase = True return readable_batch def _snake_case ( self , lowercase , **lowercase ) -> Union[str, Any]: return self.model(lowercase , **lowercase ) def _snake_case ( self , lowercase ) -> Union[str, Any]: lowerCAmelCase = self.tokenizer.batch_decode( lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) return lmap(str.strip , lowercase ) def _snake_case ( self , lowercase ) -> Tuple: lowerCAmelCase = self.tokenizer.pad_token_id lowerCAmelCase , lowerCAmelCase = batch["""input_ids"""], batch["""attention_mask"""] lowerCAmelCase = batch["""labels"""] if isinstance(self.model , lowercase ): lowerCAmelCase = self.model._shift_right(lowercase ) else: lowerCAmelCase = shift_tokens_right(lowercase , lowercase ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero lowerCAmelCase = decoder_input_ids self.save_readable_batch(lowercase ) lowerCAmelCase = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase ) lowerCAmelCase = outputs["""logits"""] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id lowerCAmelCase = nn.CrossEntropyLoss(ignore_index=lowercase ) assert lm_logits.shape[-1] == self.vocab_size lowerCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: lowerCAmelCase = nn.functional.log_softmax(lowercase , dim=-1 ) lowerCAmelCase , lowerCAmelCase = label_smoothed_nll_loss( lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase ) return (loss,) @property def _snake_case ( self ) -> int: return self.tokenizer.pad_token_id def _snake_case ( self , lowercase , lowercase ) -> Dict: lowerCAmelCase = self._step(lowercase ) lowerCAmelCase = dict(zip(self.loss_names , lowercase ) ) # tokens per batch lowerCAmelCase = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum() lowerCAmelCase = batch["""input_ids"""].shape[0] lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).sum() lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def _snake_case ( self , lowercase , lowercase ) -> Dict: return self._generative_step(lowercase ) def _snake_case ( self , lowercase , lowercase="val" ) -> Dict: self.step_count += 1 lowerCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} lowerCAmelCase = losses["""loss"""] lowerCAmelCase = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""] } lowerCAmelCase = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) lowerCAmelCase = torch.tensor(lowercase ).type_as(lowercase ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(lowercase ) lowerCAmelCase = {f'{prefix}_avg_{k}': x for k, x in losses.items()} lowerCAmelCase = self.step_count self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path lowerCAmelCase = flatten_list([x["""preds"""] for x in outputs] ) return { "log": all_metrics, "preds": preds, f'{prefix}_loss': loss, f'{prefix}_{self.val_metric}': metric_tensor, } def _snake_case ( self , lowercase , lowercase ) -> Dict: return calculate_rouge(lowercase , lowercase ) def _snake_case ( self , lowercase ) -> dict: lowerCAmelCase = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') lowerCAmelCase = self.model.generate( batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) lowerCAmelCase = (time.time() - ta) / batch["""input_ids"""].shape[0] lowerCAmelCase = self.ids_to_clean_text(lowercase ) lowerCAmelCase = self.ids_to_clean_text(batch["""labels"""] ) lowerCAmelCase = self._step(lowercase ) lowerCAmelCase = dict(zip(self.loss_names , lowercase ) ) lowerCAmelCase = self.calc_generative_metrics(lowercase , lowercase ) lowerCAmelCase = np.mean(lmap(lowercase , lowercase ) ) base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase ) return base_metrics def _snake_case ( self , lowercase , lowercase ) -> Dict: return self._generative_step(lowercase ) def _snake_case ( self , lowercase ) -> int: return self.validation_epoch_end(lowercase , prefix="""test""" ) def _snake_case ( self , lowercase ) -> SeqaSeqDataset: lowerCAmelCase = self.n_obs[type_path] lowerCAmelCase = self.target_lens[type_path] lowerCAmelCase = self.dataset_class( self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , ) return dataset def _snake_case ( self , lowercase , lowercase , lowercase = False ) -> DataLoader: lowerCAmelCase = self.get_dataset(lowercase ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": lowerCAmelCase = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 ) return DataLoader( lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": lowerCAmelCase = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , ) def _snake_case ( self ) -> DataLoader: lowerCAmelCase = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowercase ) return dataloader def _snake_case ( self ) -> DataLoader: return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size ) def _snake_case ( self ) -> DataLoader: return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size ) @staticmethod def _snake_case ( lowercase , lowercase ) -> Optional[int]: BaseTransformer.add_model_specific_args(lowercase , lowercase ) add_generic_args(lowercase , lowercase ) parser.add_argument( """--max_source_length""" , default=1_024 , type=lowercase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--max_target_length""" , default=56 , type=lowercase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--val_max_target_length""" , default=142 , type=lowercase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--test_max_target_length""" , default=142 , type=lowercase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument("""--freeze_encoder""" , action="""store_true""" ) parser.add_argument("""--freeze_embeds""" , action="""store_true""" ) parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowercase ) parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowercase ) parser.add_argument("""--max_tokens_per_batch""" , type=lowercase , default=lowercase ) parser.add_argument("""--logger_name""" , type=lowercase , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" ) parser.add_argument("""--n_train""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" ) parser.add_argument("""--n_val""" , type=lowercase , default=500 , required=lowercase , help="""# examples. -1 means use all.""" ) parser.add_argument("""--n_test""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" ) parser.add_argument( """--task""" , type=lowercase , default="""summarization""" , required=lowercase , help="""# examples. -1 means use all.""" ) parser.add_argument("""--label_smoothing""" , type=lowercase , default=0.0 , required=lowercase ) parser.add_argument("""--src_lang""" , type=lowercase , default="""""" , required=lowercase ) parser.add_argument("""--tgt_lang""" , type=lowercase , default="""""" , required=lowercase ) parser.add_argument("""--eval_beams""" , type=lowercase , default=lowercase , required=lowercase ) parser.add_argument( """--val_metric""" , type=lowercase , default=lowercase , required=lowercase , choices=["""bleu""", """rouge2""", """loss""", None] ) parser.add_argument("""--eval_max_gen_length""" , type=lowercase , default=lowercase , help="""never generate more than n tokens""" ) parser.add_argument("""--save_top_k""" , type=lowercase , default=1 , required=lowercase , help="""How many checkpoints to save""" ) parser.add_argument( """--early_stopping_patience""" , type=lowercase , default=-1 , required=lowercase , help=( """-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So""" """ val_check_interval will effect it.""" ) , ) return parser class lowercase ( _UpperCAmelCase ): _SCREAMING_SNAKE_CASE = 'translation' _SCREAMING_SNAKE_CASE = ['loss'] _SCREAMING_SNAKE_CASE = ['bleu'] _SCREAMING_SNAKE_CASE = 'bleu' def __init__( self , lowercase , **lowercase ) -> Union[str, Any]: super().__init__(lowercase , **lowercase ) lowerCAmelCase = hparams.src_lang lowerCAmelCase = hparams.tgt_lang def _snake_case ( self , lowercase , lowercase ) -> dict: return calculate_bleu(lowercase , lowercase ) def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=None ): '''simple docstring''' Path(args.output_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE ) check_output_dir(SCREAMING_SNAKE_CASE , expected_items=3 ) if model is None: if "summarization" in args.task: lowerCAmelCase = SummarizationModule(SCREAMING_SNAKE_CASE ) else: lowerCAmelCase = TranslationModule(SCREAMING_SNAKE_CASE ) lowerCAmelCase = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith("""/tmp""" ) or str(args.output_dir ).startswith("""/var""" ) ): lowerCAmelCase = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger lowerCAmelCase = os.environ.get("""WANDB_PROJECT""" , SCREAMING_SNAKE_CASE ) lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=SCREAMING_SNAKE_CASE ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' ) if args.early_stopping_patience >= 0: lowerCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: lowerCAmelCase = False lowerCAmelCase = args.val_metric == """loss""" lowerCAmelCase = generic_train( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , SCREAMING_SNAKE_CASE ) , early_stopping_callback=SCREAMING_SNAKE_CASE , logger=SCREAMING_SNAKE_CASE , ) pickle_save(model.hparams , model.output_dir / """hparams.pkl""" ) if not args.do_predict: return model lowerCAmelCase = """""" lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=SCREAMING_SNAKE_CASE ) ) if checkpoints: lowerCAmelCase = checkpoints[-1] lowerCAmelCase = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() SCREAMING_SNAKE_CASE__ = pl.Trainer.add_argparse_args(parser) SCREAMING_SNAKE_CASE__ = SummarizationModule.add_model_specific_args(parser, os.getcwd()) SCREAMING_SNAKE_CASE__ = parser.parse_args() main(args)
46
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig lowerCamelCase__ = { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Optional[Any] ='albert' def __init__( self : Optional[Any] , __lowercase : Union[str, Any]=30000 , __lowercase : List[str]=128 , __lowercase : Optional[Any]=4096 , __lowercase : Dict=12 , __lowercase : Any=1 , __lowercase : Optional[Any]=64 , __lowercase : Any=16384 , __lowercase : Any=1 , __lowercase : Union[str, Any]="gelu_new" , __lowercase : List[str]=0 , __lowercase : int=0 , __lowercase : Dict=512 , __lowercase : str=2 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=1E-12 , __lowercase : int=0.1 , __lowercase : Any="absolute" , __lowercase : Optional[int]=0 , __lowercase : Dict=2 , __lowercase : Optional[Any]=3 , **__lowercase : Any , ): '''simple docstring''' super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) __a = vocab_size __a = embedding_size __a = hidden_size __a = num_hidden_layers __a = num_hidden_groups __a = num_attention_heads __a = inner_group_num __a = hidden_act __a = intermediate_size __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = type_vocab_size __a = initializer_range __a = layer_norm_eps __a = classifier_dropout_prob __a = position_embedding_type class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): @property def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' if self.task == "multiple-choice": __a = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __a = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
302
0
'''simple docstring''' def _lowerCAmelCase ( _UpperCamelCase : int ) -> int: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise ValueError('Input must be an integer' ) if input_num <= 0: raise ValueError('Input must be positive' ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
47
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCamelCase__ = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
302
0
from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } SCREAMING_SNAKE_CASE__ : Any = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } SCREAMING_SNAKE_CASE__ : str = { 'facebook/blenderbot_small-90M': 512, } class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES lowerCamelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : Optional[Any] = BlenderbotSmallTokenizer def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Optional[Any]: super().__init__( ByteLevelBPETokenizer( vocab=UpperCamelCase__ , merges=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , ) , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , **UpperCamelCase__ , ) lowerCamelCase : List[str] = add_prefix_space def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Any: lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]: lowerCamelCase : Tuple = [self.sep_token_id] lowerCamelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
48
class SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , __lowercase : Union[str, Any] ): '''simple docstring''' __a = val __a = None __a = None def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any ): '''simple docstring''' if self.val: if val < self.val: if self.left is None: __a = Node(__lowercase ) else: self.left.insert(__lowercase ) elif val > self.val: if self.right is None: __a = Node(__lowercase ) else: self.right.insert(__lowercase ) else: __a = val def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" if root: inorder(root.left , _SCREAMING_SNAKE_CASE ) res.append(root.val ) inorder(root.right , _SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" if len(_SCREAMING_SNAKE_CASE ) == 0: return arr __a = Node(arr[0] ) for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): root.insert(arr[i] ) # Traverse BST in order. __a = [] inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
302
0
import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py __snake_case :List[str] = '''src/transformers''' __snake_case :Tuple = '''docs/source/en''' __snake_case :Dict = '''.''' def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): with open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __a = f.readlines() # Find the start prompt. __a = 0 while not lines[start_index].startswith(_UpperCAmelCase ): start_index += 1 start_index += 1 __a = start_index while not lines[end_index].startswith(_UpperCAmelCase ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | __snake_case :Union[str, Any] = '''Model|Encoder|Decoder|ForConditionalGeneration''' # Regexes that match TF/Flax/PT model names. __snake_case :Union[str, Any] = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') __snake_case :int = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. __snake_case :Tuple = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # This is to make sure the transformers module imported is the one in the repo. __snake_case :str = direct_transformers_import(TRANSFORMERS_PATH) def __snake_case ( _UpperCAmelCase ): __a = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , _UpperCAmelCase ) return [m.group(0 ) for m in matches] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = 2 if text == '''✅''' or text == '''❌''' else len(_UpperCAmelCase ) __a = (width - text_length) // 2 __a = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def __snake_case ( ): __a = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES __a = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } __a = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. __a = collections.defaultdict(_UpperCAmelCase ) __a = collections.defaultdict(_UpperCAmelCase ) __a = collections.defaultdict(_UpperCAmelCase ) __a = collections.defaultdict(_UpperCAmelCase ) __a = collections.defaultdict(_UpperCAmelCase ) # Let's lookup through all transformers object (once). for attr_name in dir(_UpperCAmelCase ): __a = None if attr_name.endswith('''Tokenizer''' ): __a = slow_tokenizers __a = attr_name[:-9] elif attr_name.endswith('''TokenizerFast''' ): __a = fast_tokenizers __a = attr_name[:-13] elif _re_tf_models.match(_UpperCAmelCase ) is not None: __a = tf_models __a = _re_tf_models.match(_UpperCAmelCase ).groups()[0] elif _re_flax_models.match(_UpperCAmelCase ) is not None: __a = flax_models __a = _re_flax_models.match(_UpperCAmelCase ).groups()[0] elif _re_pt_models.match(_UpperCAmelCase ) is not None: __a = pt_models __a = _re_pt_models.match(_UpperCAmelCase ).groups()[0] if lookup_dict is not None: while len(_UpperCAmelCase ) > 0: if attr_name in model_name_to_prefix.values(): __a = True break # Try again after removing the last word in the name __a = ''''''.join(camel_case_split(_UpperCAmelCase )[:-1] ) # Let's build that table! __a = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) __a = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support'''] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). __a = [len(_UpperCAmelCase ) + 2 for c in columns] __a = max([len(_UpperCAmelCase ) for name in model_names] ) + 2 # Build the table per se __a = '''|''' + '''|'''.join([_center_text(_UpperCAmelCase , _UpperCAmelCase ) for c, w in zip(_UpperCAmelCase , _UpperCAmelCase )] ) + '''|\n''' # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n" __a = {True: '''✅''', False: '''❌'''} for name in model_names: __a = model_name_to_prefix[name] __a = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(_UpperCAmelCase , _UpperCAmelCase ) for l, w in zip(_UpperCAmelCase , _UpperCAmelCase )] ) + "|\n" return table def __snake_case ( _UpperCAmelCase=False ): __a , __a , __a , __a = _find_text_in_file( filename=os.path.join(_UpperCAmelCase , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , ) __a = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(_UpperCAmelCase , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( '''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' ) if __name__ == "__main__": __snake_case :str = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') __snake_case :List[str] = parser.parse_args() check_model_table(args.fix_and_overwrite)
49
import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): def UpperCamelCase_ ( self : str ): '''simple docstring''' __a = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__lowercase , """width_multiplier""" ) ) class SCREAMING_SNAKE_CASE : def __init__( self : Dict , __lowercase : Union[str, Any] , __lowercase : Dict=13 , __lowercase : int=64 , __lowercase : Tuple=2 , __lowercase : Tuple=3 , __lowercase : Tuple="swish" , __lowercase : List[Any]=3 , __lowercase : List[str]=32 , __lowercase : int=0.1 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[int]=True , __lowercase : Dict=True , __lowercase : Tuple=10 , __lowercase : str=None , __lowercase : Optional[Any]=0.25 , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0 , ): '''simple docstring''' __a = parent __a = batch_size __a = image_size __a = patch_size __a = num_channels __a = make_divisible(512 * width_multiplier , divisor=8 ) __a = hidden_act __a = conv_kernel_size __a = output_stride __a = classifier_dropout_prob __a = use_labels __a = is_training __a = num_labels __a = initializer_range __a = scope __a = width_multiplier __a = ffn_dropout __a = attn_dropout def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' __a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a = None __a = None if self.use_labels: __a = ids_tensor([self.batch_size] , self.num_labels ) __a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __a = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple ): '''simple docstring''' __a = MobileViTVaModel(config=__lowercase ) model.to(__lowercase ) model.eval() __a = model(__lowercase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] ): '''simple docstring''' __a = self.num_labels __a = MobileViTVaForImageClassification(__lowercase ) model.to(__lowercase ) model.eval() __a = model(__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : int , __lowercase : str , __lowercase : Any , __lowercase : int , __lowercase : List[str] ): '''simple docstring''' __a = self.num_labels __a = MobileViTVaForSemanticSegmentation(__lowercase ) model.to(__lowercase ) model.eval() __a = model(__lowercase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) __a = model(__lowercase , labels=__lowercase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCamelCase_ ( self : Dict ): '''simple docstring''' __a = self.prepare_config_and_inputs() __a , __a , __a , __a = config_and_inputs __a = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : List[Any] =( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) __lowerCamelCase : Any =( { 'feature-extraction': MobileViTVaModel, 'image-classification': MobileViTVaForImageClassification, 'image-segmentation': MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) __lowerCamelCase : Dict =False __lowerCamelCase : Optional[Any] =False __lowerCamelCase : int =False __lowerCamelCase : Any =False def UpperCamelCase_ ( self : Dict ): '''simple docstring''' __a = MobileViTVaModelTester(self ) __a = MobileViTVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase ) def UpperCamelCase_ ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" ) def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" ) def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' pass @unittest.skip(reason="""MobileViTV2 does not output attentions""" ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" ) def UpperCamelCase_ ( self : int ): '''simple docstring''' pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' pass def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = model_class(__lowercase ) __a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a = [*signature.parameters.keys()] __a = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __lowercase ) def UpperCamelCase_ ( self : Dict ): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def UpperCamelCase_ ( self : int ): '''simple docstring''' def check_hidden_states_output(__lowercase : List[str] , __lowercase : Optional[int] , __lowercase : List[str] ): __a = model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): __a = model(**self._prepare_for_class(__lowercase , __lowercase ) ) __a = outputs.hidden_states __a = 5 self.assertEqual(len(__lowercase ) , __lowercase ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. __a = 2 for i in range(len(__lowercase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a = True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowercase ) def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase ) @slow def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a = MobileViTVaModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def lowerCAmelCase__ ( ): """simple docstring""" __a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self : Dict ): '''simple docstring''' return ( MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ) if is_vision_available() else None ) @slow def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' __a = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to( __lowercase ) __a = self.default_image_processor __a = prepare_img() __a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase ) # forward pass with torch.no_grad(): __a = model(**__lowercase ) # verify the logits __a = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowercase ) __a = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) ) @slow def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) __a = model.to(__lowercase ) __a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) __a = prepare_img() __a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase ) # forward pass with torch.no_grad(): __a = model(**__lowercase ) __a = outputs.logits # verify the logits __a = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , __lowercase ) __a = torch.tensor( [ [[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]], [[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]], [[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]], ] , device=__lowercase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) ) @slow def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) __a = model.to(__lowercase ) __a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) __a = prepare_img() __a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase ) # forward pass with torch.no_grad(): __a = model(**__lowercase ) __a = outputs.logits.detach().cpu() __a = image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] ) __a = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , __lowercase ) __a = image_processor.post_process_semantic_segmentation(outputs=__lowercase ) __a = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , __lowercase )
302
0
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ): UpperCAmelCase__ = BertJapaneseTokenizer UpperCAmelCase__ = False UpperCAmelCase__ = True def A_ ( self : Optional[int] ) -> Dict: super().setUp() lowerCamelCase__ : str = [ '[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは', '世界', '##世界', '、', '##、', '。', '##。', ] lowerCamelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def A_ ( self : Tuple , UpperCAmelCase : Dict ) -> Optional[int]: lowerCamelCase__ : Tuple = 'こんにちは、世界。 \nこんばんは、世界。' lowerCamelCase__ : Union[str, Any] = 'こんにちは 、 世界 。 こんばんは 、 世界 。' return input_text, output_text def A_ ( self : List[str] , UpperCAmelCase : List[Any] ) -> str: lowerCamelCase__ , lowerCamelCase__ : str = self.get_input_output_texts(UpperCAmelCase ) lowerCamelCase__ : int = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) lowerCamelCase__ : Union[str, Any] = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase ) return text, ids def A_ ( self : Dict ) -> List[str]: pass # TODO add if relevant def A_ ( self : List[Any] ) -> Dict: pass # TODO add if relevant def A_ ( self : Any ) -> Dict: pass # TODO add if relevant def A_ ( self : Any ) -> Optional[int]: lowerCamelCase__ : Optional[Any] = self.tokenizer_class(self.vocab_file ) lowerCamelCase__ : str = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' ) self.assertListEqual(UpperCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def A_ ( self : Any ) -> Tuple: lowerCamelCase__ : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' ) self.assertIsNotNone(UpperCAmelCase ) lowerCamelCase__ : Any = 'こんにちは、世界。\nこんばんは、世界。' lowerCamelCase__ : Union[str, Any] = tokenizer.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCamelCase__ : Any = os.path.join(self.tmpdirname , 'tokenizer.bin' ) with open(UpperCAmelCase , 'wb' ) as handle: pickle.dump(UpperCAmelCase , UpperCAmelCase ) with open(UpperCAmelCase , 'rb' ) as handle: lowerCamelCase__ : Tuple = pickle.load(UpperCAmelCase ) lowerCamelCase__ : List[str] = tokenizer_new.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def A_ ( self : List[Any] ) -> List[Any]: lowerCamelCase__ : Union[str, Any] = MecabTokenizer(mecab_dic='ipadic' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , ) def A_ ( self : Union[str, Any] ) -> Any: try: lowerCamelCase__ : Optional[int] = MecabTokenizer(mecab_dic='unidic_lite' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , ) def A_ ( self : int ) -> Union[str, Any]: try: lowerCamelCase__ : Optional[Any] = MecabTokenizer(mecab_dic='unidic' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , ) def A_ ( self : str ) -> Optional[int]: lowerCamelCase__ : Union[str, Any] = MecabTokenizer(do_lower_case=UpperCAmelCase , mecab_dic='ipadic' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , ) def A_ ( self : int ) -> List[str]: try: lowerCamelCase__ : Optional[int] = MecabTokenizer( do_lower_case=UpperCAmelCase , normalize_text=UpperCAmelCase , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , ) def A_ ( self : Dict ) -> Tuple: lowerCamelCase__ : Any = MecabTokenizer(normalize_text=UpperCAmelCase , mecab_dic='ipadic' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , ) @require_sudachi def A_ ( self : List[Any] ) -> Optional[Any]: lowerCamelCase__ : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' ) self.assertIsNotNone(UpperCAmelCase ) lowerCamelCase__ : Union[str, Any] = 'こんにちは、世界。\nこんばんは、世界。' lowerCamelCase__ : List[Any] = tokenizer.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' ) with open(UpperCAmelCase , 'wb' ) as handle: pickle.dump(UpperCAmelCase , UpperCAmelCase ) with open(UpperCAmelCase , 'rb' ) as handle: lowerCamelCase__ : Optional[int] = pickle.load(UpperCAmelCase ) lowerCamelCase__ : Optional[int] = tokenizer_new.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) @require_sudachi def A_ ( self : Dict ) -> int: lowerCamelCase__ : Any = SudachiTokenizer(sudachi_dict_type='core' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , ) @require_sudachi def A_ ( self : Dict ) -> Optional[Any]: lowerCamelCase__ : Optional[Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' ) self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] ) @require_sudachi def A_ ( self : Any ) -> int: lowerCamelCase__ : Union[str, Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' ) self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] ) @require_sudachi def A_ ( self : Any ) -> Union[str, Any]: lowerCamelCase__ : Dict = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' ) self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] ) @require_sudachi def A_ ( self : str ) -> Optional[int]: lowerCamelCase__ : List[str] = SudachiTokenizer(do_lower_case=UpperCAmelCase , sudachi_dict_type='core' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , ) @require_sudachi def A_ ( self : Union[str, Any] ) -> Tuple: lowerCamelCase__ : int = SudachiTokenizer(normalize_text=UpperCAmelCase , sudachi_dict_type='core' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , ) @require_sudachi def A_ ( self : List[Any] ) -> Tuple: lowerCamelCase__ : Union[str, Any] = SudachiTokenizer(trim_whitespace=UpperCAmelCase , sudachi_dict_type='core' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , ) @require_jumanpp def A_ ( self : str ) -> List[str]: lowerCamelCase__ : Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' ) self.assertIsNotNone(UpperCAmelCase ) lowerCamelCase__ : Any = 'こんにちは、世界。\nこんばんは、世界。' lowerCamelCase__ : Any = tokenizer.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCamelCase__ : Any = os.path.join(self.tmpdirname , 'tokenizer.bin' ) with open(UpperCAmelCase , 'wb' ) as handle: pickle.dump(UpperCAmelCase , UpperCAmelCase ) with open(UpperCAmelCase , 'rb' ) as handle: lowerCamelCase__ : Optional[int] = pickle.load(UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = tokenizer_new.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) @require_jumanpp def A_ ( self : Tuple ) -> Tuple: lowerCamelCase__ : List[str] = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , ) @require_jumanpp def A_ ( self : str ) -> Optional[Any]: lowerCamelCase__ : List[str] = JumanppTokenizer(do_lower_case=UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , ) @require_jumanpp def A_ ( self : Tuple ) -> List[str]: lowerCamelCase__ : int = JumanppTokenizer(normalize_text=UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , ) @require_jumanpp def A_ ( self : str ) -> Dict: lowerCamelCase__ : Optional[Any] = JumanppTokenizer(trim_whitespace=UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , ) @require_jumanpp def A_ ( self : Any ) -> Any: lowerCamelCase__ : Union[str, Any] = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , ) def A_ ( self : List[Any] ) -> int: lowerCamelCase__ : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは'] lowerCamelCase__ : int = {} for i, token in enumerate(UpperCAmelCase ): lowerCamelCase__ : List[Any] = i lowerCamelCase__ : int = WordpieceTokenizer(vocab=UpperCAmelCase , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] ) self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] ) self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] ) def A_ ( self : Tuple ) -> str: lowerCamelCase__ : int = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' ) lowerCamelCase__ : List[str] = tokenizer.subword_tokenizer lowerCamelCase__ : Any = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' ) self.assertListEqual(UpperCAmelCase , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] ) lowerCamelCase__ : Optional[int] = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' ) self.assertListEqual(UpperCAmelCase , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] ) def A_ ( self : Dict ) -> List[Any]: lowerCamelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' ) lowerCamelCase__ : int = tokenizer.encode('ありがとう。' , add_special_tokens=UpperCAmelCase ) lowerCamelCase__ : List[str] = tokenizer.encode('どういたしまして。' , add_special_tokens=UpperCAmelCase ) lowerCamelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase ) lowerCamelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ): UpperCAmelCase__ = BertJapaneseTokenizer UpperCAmelCase__ = False def A_ ( self : Dict ) -> Any: super().setUp() lowerCamelCase__ : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。'] lowerCamelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def A_ ( self : List[Any] , **UpperCAmelCase : str ) -> List[str]: return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **UpperCAmelCase ) def A_ ( self : List[str] , UpperCAmelCase : Union[str, Any] ) -> List[Any]: lowerCamelCase__ : str = 'こんにちは、世界。 \nこんばんは、世界。' lowerCamelCase__ : List[str] = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。' return input_text, output_text def A_ ( self : Optional[Any] ) -> List[Any]: pass # TODO add if relevant def A_ ( self : Tuple ) -> Union[str, Any]: pass # TODO add if relevant def A_ ( self : Optional[Any] ) -> Optional[int]: pass # TODO add if relevant def A_ ( self : Tuple ) -> Tuple: lowerCamelCase__ : Optional[int] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' ) lowerCamelCase__ : List[str] = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' ) self.assertListEqual( UpperCAmelCase , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def A_ ( self : Dict ) -> Any: lowerCamelCase__ : Any = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。'] lowerCamelCase__ : Optional[int] = {} for i, token in enumerate(UpperCAmelCase ): lowerCamelCase__ : Union[str, Any] = i lowerCamelCase__ : Optional[int] = CharacterTokenizer(vocab=UpperCAmelCase , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] ) self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] ) def A_ ( self : Any ) -> str: lowerCamelCase__ : Dict = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' ) lowerCamelCase__ : List[Any] = tokenizer.encode('ありがとう。' , add_special_tokens=UpperCAmelCase ) lowerCamelCase__ : Union[str, Any] = tokenizer.encode('どういたしまして。' , add_special_tokens=UpperCAmelCase ) lowerCamelCase__ : Tuple = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase ) lowerCamelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class lowerCAmelCase ( unittest.TestCase ): def A_ ( self : List[str] ) -> Dict: lowerCamelCase__ : Union[str, Any] = 'cl-tohoku/bert-base-japanese' lowerCamelCase__ : int = AutoTokenizer.from_pretrained(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) class lowerCAmelCase ( unittest.TestCase ): def A_ ( self : Tuple ) -> Optional[int]: lowerCamelCase__ : Union[str, Any] = 'cl-tohoku/bert-base-japanese' with self.assertLogs('transformers' , level='WARNING' ) as cm: BertTokenizer.from_pretrained(UpperCAmelCase ) self.assertTrue( cm.records[0].message.startswith( 'The tokenizer class you load from this checkpoint is not the same type as the class this function' ' is called from.' ) ) lowerCamelCase__ : Optional[Any] = 'bert-base-cased' with self.assertLogs('transformers' , level='WARNING' ) as cm: BertJapaneseTokenizer.from_pretrained(UpperCAmelCase ) self.assertTrue( cm.records[0].message.startswith( 'The tokenizer class you load from this checkpoint is not the same type as the class this function' ' is called from.' ) )
50
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
302
0
import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class __snake_case ( unittest.TestCase ): @parameterized.expand([(None,), ('''foo.json''',)]) def lowerCamelCase ( self : Optional[int] , _snake_case : Dict): """simple docstring""" UpperCAmelCase_ = GenerationConfig( do_sample=_snake_case , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_snake_case , config_name=_snake_case) UpperCAmelCase_ = GenerationConfig.from_pretrained(_snake_case , config_name=_snake_case) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , _snake_case) self.assertEqual(loaded_config.temperature , 0.7) self.assertEqual(loaded_config.length_penalty , 1.0) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]]) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50) self.assertEqual(loaded_config.max_length , 20) self.assertEqual(loaded_config.max_time , _snake_case) def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = AutoConfig.from_pretrained('''gpt2''') UpperCAmelCase_ = GenerationConfig.from_model_config(_snake_case) UpperCAmelCase_ = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(_snake_case , _snake_case) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id) def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = GenerationConfig() UpperCAmelCase_ = { '''max_new_tokens''': 1024, '''foo''': '''bar''', } UpperCAmelCase_ = copy.deepcopy(_snake_case) UpperCAmelCase_ = generation_config.update(**_snake_case) # update_kwargs was not modified (no side effects) self.assertEqual(_snake_case , _snake_case) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024) # `.update()` returns a dictionary of unused kwargs self.assertEqual(_snake_case , {'''foo''': '''bar'''}) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = GenerationConfig() UpperCAmelCase_ = '''bar''' with tempfile.TemporaryDirectory('''test-generation-config''') as tmp_dir: generation_config.save_pretrained(_snake_case) UpperCAmelCase_ = GenerationConfig.from_pretrained(_snake_case) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , '''bar''') UpperCAmelCase_ = GenerationConfig.from_model_config(_snake_case) assert not hasattr(_snake_case , '''foo''') # no new kwargs should be initialized if from config def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = GenerationConfig() self.assertEqual(default_config.temperature , 1.0) self.assertEqual(default_config.do_sample , _snake_case) self.assertEqual(default_config.num_beams , 1) UpperCAmelCase_ = GenerationConfig( do_sample=_snake_case , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7) self.assertEqual(config.do_sample , _snake_case) self.assertEqual(config.num_beams , 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_snake_case) UpperCAmelCase_ = GenerationConfig.from_pretrained(_snake_case , temperature=1.0) self.assertEqual(loaded_config.temperature , 1.0) self.assertEqual(loaded_config.do_sample , _snake_case) self.assertEqual(loaded_config.num_beams , 1) # default value @is_staging_test class __snake_case ( unittest.TestCase ): @classmethod def lowerCamelCase ( cls : Tuple): """simple docstring""" UpperCAmelCase_ = TOKEN HfFolder.save_token(_snake_case) @classmethod def lowerCamelCase ( cls : Tuple): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-generation-config''') except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''') except HTTPError: pass def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = GenerationConfig( do_sample=_snake_case , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''test-generation-config''' , use_auth_token=self._token) UpperCAmelCase_ = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) # Reset repo delete_repo(token=self._token , repo_id='''test-generation-config''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( _snake_case , repo_id='''test-generation-config''' , push_to_hub=_snake_case , use_auth_token=self._token) UpperCAmelCase_ = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = GenerationConfig( do_sample=_snake_case , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token) UpperCAmelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( _snake_case , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=_snake_case , use_auth_token=self._token) UpperCAmelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
51
import string import numpy def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" return b if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE ) class SCREAMING_SNAKE_CASE : __lowerCamelCase : List[str] =string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) __lowerCamelCase : List[Any] =numpy.vectorize(lambda lowerCamelCase__ : x % 36 ) __lowerCamelCase : Optional[Any] =numpy.vectorize(lowerCamelCase__ ) def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray ): '''simple docstring''' __a = self.modulus(__lowercase ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key __a = encrypt_key.shape[0] def UpperCamelCase_ ( self : Dict , __lowercase : str ): '''simple docstring''' return self.key_string.index(__lowercase ) def UpperCamelCase_ ( self : Dict , __lowercase : int ): '''simple docstring''' return self.key_string[round(__lowercase )] def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: __a = det % len(self.key_string ) __a = len(self.key_string ) if greatest_common_divisor(__lowercase , len(self.key_string ) ) != 1: __a = ( F"determinant modular {req_l} of encryption key({det}) " F"is not co prime w.r.t {req_l}.\nTry another key." ) raise ValueError(__lowercase ) def UpperCamelCase_ ( self : Dict , __lowercase : str ): '''simple docstring''' __a = [char for char in text.upper() if char in self.key_string] __a = chars[-1] while len(__lowercase ) % self.break_key != 0: chars.append(__lowercase ) return "".join(__lowercase ) def UpperCamelCase_ ( self : List[str] , __lowercase : str ): '''simple docstring''' __a = self.process_text(text.upper() ) __a = """""" for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ): __a = text[i : i + self.break_key] __a = [self.replace_letters(__lowercase ) for char in batch] __a = numpy.array([vec] ).T __a = self.modulus(self.encrypt_key.dot(__lowercase ) ).T.tolist()[ 0 ] __a = """""".join( self.replace_digits(__lowercase ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' __a = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: __a = det % len(self.key_string ) __a = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: __a = i break __a = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(__lowercase ) ) def UpperCamelCase_ ( self : Any , __lowercase : str ): '''simple docstring''' __a = self.make_decrypt_key() __a = self.process_text(text.upper() ) __a = """""" for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ): __a = text[i : i + self.break_key] __a = [self.replace_letters(__lowercase ) for char in batch] __a = numpy.array([vec] ).T __a = self.modulus(decrypt_key.dot(__lowercase ) ).T.tolist()[0] __a = """""".join( self.replace_digits(__lowercase ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def lowerCAmelCase__ ( ): """simple docstring""" __a = int(input("""Enter the order of the encryption key: """ ) ) __a = [] print("""Enter each row of the encryption key with space separated integers""" ) for _ in range(_SCREAMING_SNAKE_CASE ): __a = [int(_SCREAMING_SNAKE_CASE ) for x in input().split()] hill_matrix.append(_SCREAMING_SNAKE_CASE ) __a = HillCipher(numpy.array(_SCREAMING_SNAKE_CASE ) ) print("""Would you like to encrypt or decrypt some text? (1 or 2)""" ) __a = input("""\n1. Encrypt\n2. Decrypt\n""" ) if option == "1": __a = input("""What text would you like to encrypt?: """ ) print("""Your encrypted text is:""" ) print(hc.encrypt(_SCREAMING_SNAKE_CASE ) ) elif option == "2": __a = input("""What text would you like to decrypt?: """ ) print("""Your decrypted text is:""" ) print(hc.decrypt(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
302
0
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class A__ ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=3 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=True , A_=1 / 255 , A_=True , ): '''simple docstring''' UpperCamelCase : Dict = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} UpperCamelCase : List[str] = parent UpperCamelCase : Dict = batch_size UpperCamelCase : str = num_channels UpperCamelCase : Optional[Any] = min_resolution UpperCamelCase : Dict = max_resolution UpperCamelCase : int = do_resize UpperCamelCase : Any = size UpperCamelCase : Tuple = do_normalize UpperCamelCase : Optional[int] = image_mean UpperCamelCase : Union[str, Any] = image_std UpperCamelCase : Optional[int] = do_rescale UpperCamelCase : str = rescale_factor UpperCamelCase : int = do_pad def __UpperCamelCase( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __UpperCamelCase( self , A_ , A_=False ): '''simple docstring''' if not batched: UpperCamelCase : Optional[int] = image_inputs[0] if isinstance(A_ , Image.Image ): UpperCamelCase , UpperCamelCase : List[str] = image.size else: UpperCamelCase , UpperCamelCase : Tuple = image.shape[1], image.shape[2] if w < h: UpperCamelCase : Dict = int(self.size["shortest_edge"] * h / w ) UpperCamelCase : Dict = self.size["shortest_edge"] elif w > h: UpperCamelCase : Optional[int] = self.size["shortest_edge"] UpperCamelCase : Union[str, Any] = int(self.size["shortest_edge"] * w / h ) else: UpperCamelCase : List[str] = self.size["shortest_edge"] UpperCamelCase : Optional[int] = self.size["shortest_edge"] else: UpperCamelCase : List[Any] = [] for image in image_inputs: UpperCamelCase , UpperCamelCase : int = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCamelCase : List[Any] = max(A_ , key=lambda A_ : item[0] )[0] UpperCamelCase : int = max(A_ , key=lambda A_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :int = DeformableDetrImageProcessor if is_vision_available() else None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = DeformableDetrImageProcessingTester(self ) @property def __UpperCamelCase( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ , "image_mean" ) ) self.assertTrue(hasattr(A_ , "image_std" ) ) self.assertTrue(hasattr(A_ , "do_normalize" ) ) self.assertTrue(hasattr(A_ , "do_resize" ) ) self.assertTrue(hasattr(A_ , "do_rescale" ) ) self.assertTrue(hasattr(A_ , "do_pad" ) ) self.assertTrue(hasattr(A_ , "size" ) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} ) self.assertEqual(image_processor.do_pad , A_ ) UpperCamelCase : str = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ ) self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} ) self.assertEqual(image_processor.do_pad , A_ ) def __UpperCamelCase( self ): '''simple docstring''' pass def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ ) for image in image_inputs: self.assertIsInstance(A_ , Image.Image ) # Test not batched input UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values UpperCamelCase , UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(A_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase , UpperCamelCase : int = self.image_processor_tester.get_expected_values(A_ , batched=A_ ) UpperCamelCase : Any = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , np.ndarray ) # Test not batched input UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values UpperCamelCase , UpperCamelCase : int = self.image_processor_tester.get_expected_values(A_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase : Optional[int] = image_processing(A_ , return_tensors="pt" ).pixel_values UpperCamelCase , UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(A_ , batched=A_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , torch.Tensor ) # Test not batched input UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values UpperCamelCase , UpperCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(A_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase : Union[str, Any] = image_processing(A_ , return_tensors="pt" ).pixel_values UpperCamelCase , UpperCamelCase : Dict = self.image_processor_tester.get_expected_values(A_ , batched=A_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: UpperCamelCase : str = json.loads(f.read() ) UpperCamelCase : Dict = {"image_id": 3_9769, "annotations": target} # encode them UpperCamelCase : str = DeformableDetrImageProcessor() UpperCamelCase : Optional[Any] = image_processing(images=A_ , annotations=A_ , return_tensors="pt" ) # verify pixel values UpperCamelCase : Optional[int] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["pixel_values"].shape , A_ ) UpperCamelCase : Any = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A_ , atol=1e-4 ) ) # verify area UpperCamelCase : Union[str, Any] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A_ ) ) # verify boxes UpperCamelCase : List[str] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , A_ ) UpperCamelCase : List[Any] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A_ , atol=1e-3 ) ) # verify image_id UpperCamelCase : Optional[Any] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A_ ) ) # verify is_crowd UpperCamelCase : str = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A_ ) ) # verify class_labels UpperCamelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A_ ) ) # verify orig_size UpperCamelCase : Dict = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A_ ) ) # verify size UpperCamelCase : Any = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A_ ) ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: UpperCamelCase : Optional[int] = json.loads(f.read() ) UpperCamelCase : Any = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target} UpperCamelCase : Tuple = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them UpperCamelCase : List[Any] = DeformableDetrImageProcessor(format="coco_panoptic" ) UpperCamelCase : Dict = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors="pt" ) # verify pixel values UpperCamelCase : Any = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["pixel_values"].shape , A_ ) UpperCamelCase : List[Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A_ , atol=1e-4 ) ) # verify area UpperCamelCase : Union[str, Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A_ ) ) # verify boxes UpperCamelCase : Dict = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , A_ ) UpperCamelCase : Any = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A_ , atol=1e-3 ) ) # verify image_id UpperCamelCase : Union[str, Any] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A_ ) ) # verify is_crowd UpperCamelCase : Dict = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A_ ) ) # verify class_labels UpperCamelCase : Any = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A_ ) ) # verify masks UpperCamelCase : Dict = 82_2873 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , A_ ) # verify orig_size UpperCamelCase : List[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A_ ) ) # verify size UpperCamelCase : List[Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A_ ) )
52
from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : List[Any] ='autoformer' __lowerCamelCase : str ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self : List[Any] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : str = "student_t" , __lowercase : str = "nll" , __lowercase : int = 1 , __lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase : bool = True , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : Optional[List[int]] = None , __lowercase : Optional[List[int]] = None , __lowercase : int = 64 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 32 , __lowercase : int = 32 , __lowercase : str = "gelu" , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 100 , __lowercase : float = 0.02 , __lowercase : bool = True , __lowercase : List[Any]=True , __lowercase : int = 10 , __lowercase : int = 25 , __lowercase : int = 3 , **__lowercase : Optional[int] , ): '''simple docstring''' # time series specific configuration __a = prediction_length __a = context_length if context_length is not None else prediction_length __a = distribution_output __a = loss __a = input_size __a = num_time_features __a = lags_sequence __a = scaling __a = num_dynamic_real_features __a = num_static_real_features __a = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(__lowercase ) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""" ) __a = cardinality else: __a = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(__lowercase ) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""" ) __a = embedding_dimension else: __a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] __a = num_parallel_samples # Transformer architecture configuration __a = input_size * len(self.lags_sequence ) + self._number_of_features __a = d_model __a = encoder_attention_heads __a = decoder_attention_heads __a = encoder_ffn_dim __a = decoder_ffn_dim __a = encoder_layers __a = decoder_layers __a = dropout __a = attention_dropout __a = activation_dropout __a = encoder_layerdrop __a = decoder_layerdrop __a = activation_function __a = init_std __a = use_cache # Autoformer __a = label_length __a = moving_average __a = autocorrelation_factor super().__init__(is_encoder_decoder=__lowercase , **__lowercase ) @property def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
302
0
'''simple docstring''' import os def lowercase__ ( __lowercase : str = "matrix.txt" ) -> int: """simple docstring""" with open(os.path.join(os.path.dirname(__lowercase ) , __lowercase ) ) as in_file: __UpperCamelCase = in_file.read() __UpperCamelCase = [[int(__lowercase ) for cell in row.split(',' )] for row in data.strip().splitlines()] __UpperCamelCase = [[0 for cell in row] for row in grid] __UpperCamelCase = len(grid[0] ) __UpperCamelCase = [[0 for i in range(__lowercase )] for j in range(__lowercase )] __UpperCamelCase = grid[0][0] for i in range(1 , __lowercase ): __UpperCamelCase = grid[0][i] + dp[0][i - 1] for i in range(1 , __lowercase ): __UpperCamelCase = grid[i][0] + dp[i - 1][0] for i in range(1 , __lowercase ): for j in range(1 , __lowercase ): __UpperCamelCase = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] ) return dp[-1][-1] if __name__ == "__main__": print(f'{solution() = }')
53
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
302
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Union[str, Any] = logging.get_logger(__name__) a__ : int = { '''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''', } class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Any = "gpt_bigcode" snake_case__ : str = ["past_key_values"] snake_case__ : Optional[Any] = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : List[Any] , UpperCAmelCase__ : Any=5_0_2_5_7 , UpperCAmelCase__ : Optional[int]=1_0_2_4 , UpperCAmelCase__ : int=7_6_8 , UpperCAmelCase__ : Dict=1_2 , UpperCAmelCase__ : Any=1_2 , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : str="gelu_pytorch_tanh" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[str]=1E-5 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : str=5_0_2_5_6 , UpperCAmelCase__ : Union[str, Any]=5_0_2_5_6 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : List[str] , ) -> Dict: __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = n_positions __SCREAMING_SNAKE_CASE = n_embd __SCREAMING_SNAKE_CASE = n_layer __SCREAMING_SNAKE_CASE = n_head __SCREAMING_SNAKE_CASE = n_inner __SCREAMING_SNAKE_CASE = activation_function __SCREAMING_SNAKE_CASE = resid_pdrop __SCREAMING_SNAKE_CASE = embd_pdrop __SCREAMING_SNAKE_CASE = attn_pdrop __SCREAMING_SNAKE_CASE = layer_norm_epsilon __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = scale_attn_weights __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = attention_softmax_in_fpaa __SCREAMING_SNAKE_CASE = scale_attention_softmax_in_fpaa __SCREAMING_SNAKE_CASE = multi_query __SCREAMING_SNAKE_CASE = bos_token_id __SCREAMING_SNAKE_CASE = eos_token_id super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
54
from __future__ import annotations lowerCamelCase__ = """#""" class SCREAMING_SNAKE_CASE : def __init__( self : Optional[Any] ): '''simple docstring''' __a = {} def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str ): '''simple docstring''' __a = self._trie for char in text: if char not in trie: __a = {} __a = trie[char] __a = True def UpperCamelCase_ ( self : Tuple , __lowercase : str ): '''simple docstring''' __a = self._trie for char in prefix: if char in trie: __a = trie[char] else: return [] return self._elements(__lowercase ) def UpperCamelCase_ ( self : Optional[int] , __lowercase : dict ): '''simple docstring''' __a = [] for c, v in d.items(): __a = [""" """] if c == END else [(c + s) for s in self._elements(__lowercase )] result.extend(__lowercase ) return tuple(__lowercase ) lowerCamelCase__ = Trie() lowerCamelCase__ = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""") for word in words: trie.insert_word(word) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" __a = trie.find_word(_SCREAMING_SNAKE_CASE ) return tuple(string + word for word in suffixes ) def lowerCAmelCase__ ( ): """simple docstring""" print(autocomplete_using_trie("""de""" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
302
0
'''simple docstring''' from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Optional[int] = logging.get_logger(__name__) a_ : int = { """huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""", } class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = "autoformer" _lowerCamelCase = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = "student_t" , UpperCamelCase = "nll" , UpperCamelCase = 1 , UpperCamelCase = [1, 2, 3, 4, 5, 6, 7] , UpperCamelCase = True , UpperCamelCase = 0 , UpperCamelCase = 0 , UpperCamelCase = 0 , UpperCamelCase = 0 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 64 , UpperCamelCase = 2 , UpperCamelCase = 2 , UpperCamelCase = 2 , UpperCamelCase = 2 , UpperCamelCase = 32 , UpperCamelCase = 32 , UpperCamelCase = "gelu" , UpperCamelCase = 0.1 , UpperCamelCase = 0.1 , UpperCamelCase = 0.1 , UpperCamelCase = 0.1 , UpperCamelCase = 0.1 , UpperCamelCase = 100 , UpperCamelCase = 0.02 , UpperCamelCase = True , UpperCamelCase=True , UpperCamelCase = 10 , UpperCamelCase = 25 , UpperCamelCase = 3 , **UpperCamelCase , ): """simple docstring""" # time series specific configuration lowerCamelCase_ = prediction_length lowerCamelCase_ = context_length if context_length is not None else prediction_length lowerCamelCase_ = distribution_output lowerCamelCase_ = loss lowerCamelCase_ = input_size lowerCamelCase_ = num_time_features lowerCamelCase_ = lags_sequence lowerCamelCase_ = scaling lowerCamelCase_ = num_dynamic_real_features lowerCamelCase_ = num_static_real_features lowerCamelCase_ = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(UpperCamelCase ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) lowerCamelCase_ = cardinality else: lowerCamelCase_ = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(UpperCamelCase ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) lowerCamelCase_ = embedding_dimension else: lowerCamelCase_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowerCamelCase_ = num_parallel_samples # Transformer architecture configuration lowerCamelCase_ = input_size * len(self.lags_sequence ) + self._number_of_features lowerCamelCase_ = d_model lowerCamelCase_ = encoder_attention_heads lowerCamelCase_ = decoder_attention_heads lowerCamelCase_ = encoder_ffn_dim lowerCamelCase_ = decoder_ffn_dim lowerCamelCase_ = encoder_layers lowerCamelCase_ = decoder_layers lowerCamelCase_ = dropout lowerCamelCase_ = attention_dropout lowerCamelCase_ = activation_dropout lowerCamelCase_ = encoder_layerdrop lowerCamelCase_ = decoder_layerdrop lowerCamelCase_ = activation_function lowerCamelCase_ = init_std lowerCamelCase_ = use_cache # Autoformer lowerCamelCase_ = label_length lowerCamelCase_ = moving_average lowerCamelCase_ = autocorrelation_factor super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase ) @property def snake_case ( self ): """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
55
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : torch.FloatTensor class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self : Dict , __lowercase : int = 32 , __lowercase : int = 64 , __lowercase : int = 20 , __lowercase : int = 768 , __lowercase : Any=77 , __lowercase : Optional[int]=4 , __lowercase : float = 0.0 , __lowercase : str = "silu" , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "linear" , __lowercase : Optional[str] = "prd" , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , ): '''simple docstring''' super().__init__() __a = num_attention_heads __a = attention_head_dim __a = num_attention_heads * attention_head_dim __a = additional_embeddings __a = time_embed_dim or inner_dim __a = embedding_proj_dim or embedding_dim __a = clip_embed_dim or embedding_dim __a = Timesteps(__lowercase , __lowercase , 0 ) __a = TimestepEmbedding(__lowercase , __lowercase , out_dim=__lowercase , act_fn=__lowercase ) __a = nn.Linear(__lowercase , __lowercase ) if embedding_proj_norm_type is None: __a = None elif embedding_proj_norm_type == "layer": __a = nn.LayerNorm(__lowercase ) else: raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" ) __a = nn.Linear(__lowercase , __lowercase ) if encoder_hid_proj_type is None: __a = None elif encoder_hid_proj_type == "linear": __a = nn.Linear(__lowercase , __lowercase ) else: raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" ) __a = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowercase ) ) if added_emb_type == "prd": __a = nn.Parameter(torch.zeros(1 , 1 , __lowercase ) ) elif added_emb_type is None: __a = None else: raise ValueError( F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." ) __a = nn.ModuleList( [ BasicTransformerBlock( __lowercase , __lowercase , __lowercase , dropout=__lowercase , activation_fn="""gelu""" , attention_bias=__lowercase , ) for d in range(__lowercase ) ] ) if norm_in_type == "layer": __a = nn.LayerNorm(__lowercase ) elif norm_in_type is None: __a = None else: raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." ) __a = nn.LayerNorm(__lowercase ) __a = nn.Linear(__lowercase , __lowercase ) __a = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 ) causal_attention_mask.triu_(1 ) __a = causal_attention_mask[None, ...] self.register_buffer("""causal_attention_mask""" , __lowercase , persistent=__lowercase ) __a = nn.Parameter(torch.zeros(1 , __lowercase ) ) __a = nn.Parameter(torch.zeros(1 , __lowercase ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' __a = {} def fn_recursive_add_processors(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict[str, AttentionProcessor] ): if hasattr(__lowercase , """set_processor""" ): __a = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"{name}.{sub_name}" , __lowercase , __lowercase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__lowercase , __lowercase , __lowercase ) return processors def UpperCamelCase_ ( self : List[str] , __lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ): '''simple docstring''' __a = len(self.attn_processors.keys() ) if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != count: raise ValueError( F"A dict of processors was passed, but the number of processors {len(__lowercase )} does not match the" F" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict ): if hasattr(__lowercase , """set_processor""" ): if not isinstance(__lowercase , __lowercase ): module.set_processor(__lowercase ) else: module.set_processor(processor.pop(F"{name}.processor" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"{name}.{sub_name}" , __lowercase , __lowercase ) for name, module in self.named_children(): fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase ) def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' self.set_attn_processor(AttnProcessor() ) def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[torch.Tensor, float, int] , __lowercase : torch.FloatTensor , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.BoolTensor] = None , __lowercase : bool = True , ): '''simple docstring''' __a = hidden_states.shape[0] __a = timestep if not torch.is_tensor(__lowercase ): __a = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0: __a = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __a = timesteps * torch.ones(__lowercase , dtype=timesteps.dtype , device=timesteps.device ) __a = self.time_proj(__lowercase ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. __a = timesteps_projected.to(dtype=self.dtype ) __a = self.time_embedding(__lowercase ) if self.embedding_proj_norm is not None: __a = self.embedding_proj_norm(__lowercase ) __a = self.embedding_proj(__lowercase ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: __a = self.encoder_hidden_states_proj(__lowercase ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" ) __a = self.proj_in(__lowercase ) __a = self.positional_embedding.to(hidden_states.dtype ) __a = [] __a = 0 if encoder_hidden_states is not None: additional_embeds.append(__lowercase ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: __a = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: __a = hidden_states[:, None, :] __a = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: __a = self.prd_embedding.to(hidden_states.dtype ).expand(__lowercase , -1 , -1 ) additional_embeds.append(__lowercase ) __a = torch.cat( __lowercase , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens __a = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: __a = F.pad( __lowercase , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) __a = hidden_states + positional_embeddings if attention_mask is not None: __a = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0 __a = F.pad(__lowercase , (0, self.additional_embeddings) , value=0.0 ) __a = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) __a = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: __a = self.norm_in(__lowercase ) for block in self.transformer_blocks: __a = block(__lowercase , attention_mask=__lowercase ) __a = self.norm_out(__lowercase ) if self.prd_embedding is not None: __a = hidden_states[:, -1] else: __a = hidden_states[:, additional_embeddings_len:] __a = self.proj_to_clip_embeddings(__lowercase ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=__lowercase ) def UpperCamelCase_ ( self : Any , __lowercase : Tuple ): '''simple docstring''' __a = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
302
0
'''simple docstring''' from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class a : snake_case_ = field( metadata={"help": "The output directory where the model will be written."} , ) snake_case_ = field( metadata={ "help": ( "The encoder model checkpoint for weights initialization." "Don't set if you want to train an encoder model from scratch." ) } , ) snake_case_ = field( metadata={ "help": ( "The decoder model checkpoint for weights initialization." "Don't set if you want to train a decoder model from scratch." ) } , ) snake_case_ = field( default=_lowerCamelCase , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} ) snake_case_ = field( default=_lowerCamelCase , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} ) def __magic_name__ ( ) -> List[str]: '''simple docstring''' snake_case_ = HfArgumentParser((ModelArguments,) ) ((snake_case_) ,) = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: snake_case_ = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: snake_case_ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: snake_case_ = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: snake_case_ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed snake_case_ = True snake_case_ = True snake_case_ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path, decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path, encoder_config=__UpperCAmelCase, decoder_config=__UpperCAmelCase, ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens snake_case_ = decoder_config.decoder_start_token_id snake_case_ = decoder_config.pad_token_id if decoder_start_token_id is None: snake_case_ = decoder_config.bos_token_id if pad_token_id is None: snake_case_ = decoder_config.eos_token_id # This is necessary to make Flax's generate() work snake_case_ = decoder_config.eos_token_id snake_case_ = decoder_start_token_id snake_case_ = pad_token_id snake_case_ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) snake_case_ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) snake_case_ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
56
from functools import lru_cache def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" __a = 2 __a = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(_SCREAMING_SNAKE_CASE ) if n > 1: factors.add(_SCREAMING_SNAKE_CASE ) return factors @lru_cache def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" return len(unique_prime_factors(_SCREAMING_SNAKE_CASE ) ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ): """simple docstring""" return len(set(_SCREAMING_SNAKE_CASE ) ) in (0, 1) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" __a = 2 while True: # Increment each value of a generated range __a = [base + i for i in range(_SCREAMING_SNAKE_CASE )] # Run elements through out unique_prime_factors function # Append our target number to the end. __a = [upf_len(_SCREAMING_SNAKE_CASE ) for x in group] checker.append(_SCREAMING_SNAKE_CASE ) # If all numbers in the list are equal, return the group variable. if equality(_SCREAMING_SNAKE_CASE ): return group # Increment our base variable by 1 base += 1 def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 4 ): """simple docstring""" __a = run(_SCREAMING_SNAKE_CASE ) return results[0] if len(_SCREAMING_SNAKE_CASE ) else None if __name__ == "__main__": print(solution())
302
0
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs A : Union[str, Any] = imread(R"digital_image_processing/image_data/lena_small.jpg") A : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY) def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = cn.convert_to_negative(_UpperCamelCase ) # assert negative_img array for at least one True assert negative_img.any() def _lowerCamelCase ( ): '''simple docstring''' with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img: # Work around assertion for response assert str(cc.change_contrast(_UpperCamelCase , 110 ) ).startswith( "<PIL.Image.Image image mode=RGB size=100x100 at" ) def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 ) # assert ambiguous array for all == True assert canny_img.all() __lowerCAmelCase = canny.canny(_UpperCamelCase ) # assert canny array for at least one True assert canny_array.any() def _lowerCamelCase ( ): '''simple docstring''' assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all() def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) __lowerCAmelCase = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase ) assert res.any() def _lowerCamelCase ( ): '''simple docstring''' assert med.median_filter(_UpperCamelCase , 3 ).any() def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(_UpperCamelCase ) assert grad.any() and theta.any() def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = sp.make_sepia(_UpperCamelCase , 20 ) assert sepia.all() def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' __lowerCAmelCase = bs.Burkes(imread(_UpperCamelCase , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' __lowerCAmelCase = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = "digital_image_processing/image_data/lena.jpg" # Reading the image and converting it to grayscale. __lowerCAmelCase = imread(_UpperCamelCase , 0 ) # Test for get_neighbors_pixel function() return not None __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = image[x_coordinate][y_coordinate] __lowerCAmelCase = lbp.get_neighbors_pixel( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image __lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): __lowerCAmelCase = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) assert lbp_image.any()
57
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" __a = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: __a = 128 elif "12-12" in model_name: __a = 12 __a = 12 elif "14-14" in model_name: __a = 14 __a = 14 elif "16-16" in model_name: __a = 16 __a = 16 else: raise ValueError("""Model not supported""" ) __a = """huggingface/label-files""" if "speech-commands" in model_name: __a = 35 __a = """speech-commands-v2-id2label.json""" else: __a = 527 __a = """audioset-id2label.json""" __a = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) __a = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __a = idalabel __a = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" if "module.v" in name: __a = name.replace("""module.v""" , """audio_spectrogram_transformer""" ) if "cls_token" in name: __a = name.replace("""cls_token""" , """embeddings.cls_token""" ) if "dist_token" in name: __a = name.replace("""dist_token""" , """embeddings.distillation_token""" ) if "pos_embed" in name: __a = name.replace("""pos_embed""" , """embeddings.position_embeddings""" ) if "patch_embed.proj" in name: __a = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) # transformer blocks if "blocks" in name: __a = name.replace("""blocks""" , """encoder.layer""" ) if "attn.proj" in name: __a = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: __a = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: __a = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: __a = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: __a = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: __a = name.replace("""mlp.fc2""" , """output.dense""" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: __a = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" ) # classifier head if "module.mlp_head.0" in name: __a = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" ) if "module.mlp_head.1" in name: __a = name.replace("""module.mlp_head.1""" , """classifier.dense""" ) return name def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" for key in orig_state_dict.copy().keys(): __a = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "qkv" in key: __a = key.split(""".""" ) __a = int(key_split[3] ) __a = config.hidden_size if "weight" in key: __a = val[:dim, :] __a = val[dim : dim * 2, :] __a = val[-dim:, :] else: __a = val[:dim] __a = val[dim : dim * 2] __a = val[-dim:] else: __a = val return orig_state_dict def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" __a = [ """module.v.head.weight""", """module.v.head.bias""", """module.v.head_dist.weight""", """module.v.head_dist.bias""", ] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @torch.no_grad() def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str]=False ): """simple docstring""" __a = get_audio_spectrogram_transformer_config(_SCREAMING_SNAKE_CASE ) __a = { """ast-finetuned-audioset-10-10-0.4593""": ( """https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.450""": ( """https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448""": ( """https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448-v2""": ( """https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1""" ), """ast-finetuned-audioset-12-12-0.447""": ( """https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1""" ), """ast-finetuned-audioset-14-14-0.443""": ( """https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1""" ), """ast-finetuned-audioset-16-16-0.442""": ( """https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1""" ), """ast-finetuned-speech-commands-v2""": ( """https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1""" ), } # load original state_dict __a = model_name_to_url[model_name] __a = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" ) # remove some keys remove_keys(_SCREAMING_SNAKE_CASE ) # rename some keys __a = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load 🤗 model __a = ASTForAudioClassification(_SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 __a = -4.267_7393 if """speech-commands""" not in model_name else -6.84_5978 __a = 4.568_9974 if """speech-commands""" not in model_name else 5.565_4526 __a = 1024 if """speech-commands""" not in model_name else 128 __a = ASTFeatureExtractor(mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) if "speech-commands" in model_name: __a = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" ) __a = dataset[0]["""audio"""]["""array"""] else: __a = hf_hub_download( repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , ) __a , __a = torchaudio.load(_SCREAMING_SNAKE_CASE ) __a = waveform.squeeze().numpy() __a = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=1_6000 , return_tensors="""pt""" ) # forward pass __a = model(**_SCREAMING_SNAKE_CASE ) __a = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": __a = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": __a = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": __a = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": __a = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": __a = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": __a = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": __a = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": __a = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError("""Unknown model name""" ) if not torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ): raise ValueError("""Logits don't match""" ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f"Saving feature extractor to {pytorch_dump_folder_path}" ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: print("""Pushing model and feature extractor to the hub...""" ) model.push_to_hub(f"MIT/{model_name}" ) feature_extractor.push_to_hub(f"MIT/{model_name}" ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowerCamelCase__ = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
302
0
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. lowercase_ = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class a_ ( unittest.TestCase ): '''simple docstring''' UpperCamelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCamelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: UpperCamelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: UpperCamelCase = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def snake_case_( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" ) _SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) _SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" , top_k=2 ) self.assertEqual( nested_simplify(A ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] ) _SCREAMING_SNAKE_CASE = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 ) self.assertEqual( nested_simplify(A ) , [ [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], ] , ) _SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" , top_k=1 ) self.assertEqual(nested_simplify(A ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) # Legacy behavior _SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" , return_all_scores=A ) self.assertEqual(nested_simplify(A ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) _SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" , return_all_scores=A ) self.assertEqual( nested_simplify(A ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] ) _SCREAMING_SNAKE_CASE = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=A ) self.assertEqual( nested_simplify(A ) , [ [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], ] , ) _SCREAMING_SNAKE_CASE = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=A ) self.assertEqual( nested_simplify(A ) , [ {"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_0""", """score""": 0.504}, ] , ) @require_torch def snake_case_( self ) -> int: import torch _SCREAMING_SNAKE_CASE = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , ) _SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) @require_tf def snake_case_( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" ) _SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) @slow @require_torch def snake_case_( self ) -> Any: _SCREAMING_SNAKE_CASE = pipeline("""text-classification""" ) _SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) _SCREAMING_SNAKE_CASE = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(A ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) _SCREAMING_SNAKE_CASE = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(A ) , [{"""label""": """POSITIVE""", """score""": 0.988}] ) @slow @require_tf def snake_case_( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = pipeline("""text-classification""" , framework="""tf""" ) _SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) _SCREAMING_SNAKE_CASE = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(A ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) _SCREAMING_SNAKE_CASE = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(A ) , [{"""label""": """POSITIVE""", """score""": 0.988}] ) def snake_case_( self , A , A , A ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = TextClassificationPipeline(model=A , tokenizer=A ) return text_classifier, ["HuggingFace is in", "This is another test"] def snake_case_( self , A , A ) -> int: _SCREAMING_SNAKE_CASE = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 _SCREAMING_SNAKE_CASE = """HuggingFace is in""" _SCREAMING_SNAKE_CASE = text_classifier(A ) self.assertEqual(nested_simplify(A ) , [{"""label""": ANY(A ), """score""": ANY(A )}] ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) _SCREAMING_SNAKE_CASE = ["""HuggingFace is in """, """Paris is in France"""] _SCREAMING_SNAKE_CASE = text_classifier(A ) self.assertEqual( nested_simplify(A ) , [{"""label""": ANY(A ), """score""": ANY(A )}, {"""label""": ANY(A ), """score""": ANY(A )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format _SCREAMING_SNAKE_CASE = text_classifier(A , top_k=A ) _SCREAMING_SNAKE_CASE = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(A ) , [[{"""label""": ANY(A ), """score""": ANY(A )}] * N, [{"""label""": ANY(A ), """score""": ANY(A )}] * N] , ) _SCREAMING_SNAKE_CASE = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""} _SCREAMING_SNAKE_CASE = text_classifier(A ) self.assertEqual( nested_simplify(A ) , {"""label""": ANY(A ), """score""": ANY(A )} , ) self.assertTrue(outputs["""label"""] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. _SCREAMING_SNAKE_CASE = [["""HuggingFace is in """, """Paris is in France"""]] with self.assertRaises(A ): text_classifier(A ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility _SCREAMING_SNAKE_CASE = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] ) self.assertEqual( nested_simplify(A ) , [{"""label""": ANY(A ), """score""": ANY(A )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
58
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: lowerCamelCase__ = None lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCamelCase__ = { """vocab_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""", }, """tokenizer_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""", }, } lowerCamelCase__ = { """albert-base-v1""": 512, """albert-large-v1""": 512, """albert-xlarge-v1""": 512, """albert-xxlarge-v1""": 512, """albert-base-v2""": 512, """albert-large-v2""": 512, """albert-xlarge-v2""": 512, """albert-xxlarge-v2""": 512, } lowerCamelCase__ = """▁""" class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : List[Any] =VOCAB_FILES_NAMES __lowerCamelCase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : Any =AlbertTokenizer def __init__( self : Tuple , __lowercase : Union[str, Any]=None , __lowercase : Optional[int]=None , __lowercase : int=True , __lowercase : Dict=True , __lowercase : str=False , __lowercase : str="[CLS]" , __lowercase : List[Any]="[SEP]" , __lowercase : Any="<unk>" , __lowercase : List[Any]="[SEP]" , __lowercase : List[Any]="<pad>" , __lowercase : Optional[Any]="[CLS]" , __lowercase : List[str]="[MASK]" , **__lowercase : str , ): '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. __a = ( AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token ) super().__init__( __lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , ) __a = do_lower_case __a = remove_space __a = keep_accents __a = vocab_file __a = False if not self.vocab_file else True def UpperCamelCase_ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ): '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase_ ( self : str , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ): '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__lowercase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return __a = os.path.join( __lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ): copyfile(self.vocab_file , __lowercase ) return (out_vocab_file,)
302
0
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class UpperCAmelCase ( A_ ): A__ : jnp.ndarray @flax_register_to_config class UpperCAmelCase ( nn.Module ,A_ ,A_ ): A__ : int = 32 A__ : int = 4 A__ : int = 4 A__ : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) A__ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") A__ : Union[bool, Tuple[bool]] = False A__ : Tuple[int] = (3_20, 6_40, 12_80, 12_80) A__ : int = 2 A__ : Union[int, Tuple[int]] = 8 A__ : Optional[Union[int, Tuple[int]]] = None A__ : int = 12_80 A__ : float = 0.0 A__ : bool = False A__ : jnp.dtype = jnp.floataa A__ : bool = True A__ : int = 0 A__ : bool = False def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : jax.random.KeyArray ) -> FrozenDict: '''simple docstring''' snake_case : Dict = (1, self.in_channels, self.sample_size, self.sample_size) snake_case : Any = jnp.zeros(snake_case__ , dtype=jnp.floataa ) snake_case : List[str] = jnp.ones((1,) , dtype=jnp.intaa ) snake_case : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) snake_case , snake_case : Optional[int] = jax.random.split(snake_case__ ) snake_case : Union[str, Any] = {"params": params_rng, "dropout": dropout_rng} return self.init(snake_case__ , snake_case__ , snake_case__ , snake_case__ )["params"] def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple: '''simple docstring''' snake_case : str = self.block_out_channels snake_case : Optional[Any] = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. snake_case : Tuple = self.num_attention_heads or self.attention_head_dim # input snake_case : Tuple = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time snake_case : Union[str, Any] = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) snake_case : Dict = FlaxTimestepEmbedding(snake_case__ , dtype=self.dtype ) snake_case : List[str] = self.only_cross_attention if isinstance(snake_case__ , snake_case__ ): snake_case : List[Any] = (only_cross_attention,) * len(self.down_block_types ) if isinstance(snake_case__ , snake_case__ ): snake_case : List[Any] = (num_attention_heads,) * len(self.down_block_types ) # down snake_case : List[Any] = [] snake_case : Optional[int] = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): snake_case : List[Any] = output_channel snake_case : Dict = block_out_channels[i] snake_case : Optional[Any] = i == len(snake_case__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": snake_case : List[Any] = FlaxCrossAttnDownBlockaD( in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: snake_case : Union[str, Any] = FlaxDownBlockaD( in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(snake_case__ ) snake_case : Dict = down_blocks # mid snake_case : Optional[int] = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up snake_case : Optional[Any] = [] snake_case : Optional[int] = list(reversed(snake_case__ ) ) snake_case : Dict = list(reversed(snake_case__ ) ) snake_case : Tuple = list(reversed(snake_case__ ) ) snake_case : Optional[Any] = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): snake_case : Optional[int] = output_channel snake_case : List[Any] = reversed_block_out_channels[i] snake_case : Union[str, Any] = reversed_block_out_channels[min(i + 1 , len(snake_case__ ) - 1 )] snake_case : int = i == len(snake_case__ ) - 1 if up_block_type == "CrossAttnUpBlock2D": snake_case : Any = FlaxCrossAttnUpBlockaD( in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: snake_case : Optional[int] = FlaxUpBlockaD( in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(snake_case__ ) snake_case : Optional[int] = output_channel snake_case : Tuple = up_blocks # out snake_case : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) snake_case : List[str] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__(self : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : bool = True , snake_case__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]: '''simple docstring''' if not isinstance(snake_case__ , jnp.ndarray ): snake_case : List[Any] = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(snake_case__ , jnp.ndarray ) and len(timesteps.shape ) == 0: snake_case : Any = timesteps.astype(dtype=jnp.floataa ) snake_case : int = jnp.expand_dims(snake_case__ , 0 ) snake_case : str = self.time_proj(snake_case__ ) snake_case : str = self.time_embedding(snake_case__ ) # 2. pre-process snake_case : int = jnp.transpose(snake_case__ , (0, 2, 3, 1) ) snake_case : List[Any] = self.conv_in(snake_case__ ) # 3. down snake_case : Optional[int] = (sample,) for down_block in self.down_blocks: if isinstance(snake_case__ , snake_case__ ): snake_case , snake_case : List[Any] = down_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train ) else: snake_case , snake_case : str = down_block(snake_case__ , snake_case__ , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: snake_case : Tuple = () for down_block_res_sample, down_block_additional_residual in zip( snake_case__ , snake_case__ ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) snake_case : Optional[int] = new_down_block_res_samples # 4. mid snake_case : Optional[int] = self.mid_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: snake_case : int = down_block_res_samples[-(self.layers_per_block + 1) :] snake_case : Optional[Any] = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(snake_case__ , snake_case__ ): snake_case : Optional[Any] = up_block( snake_case__ , temb=snake_case__ , encoder_hidden_states=snake_case__ , res_hidden_states_tuple=snake_case__ , deterministic=not train , ) else: snake_case : Dict = up_block(snake_case__ , temb=snake_case__ , res_hidden_states_tuple=snake_case__ , deterministic=not train ) # 6. post-process snake_case : List[str] = self.conv_norm_out(snake_case__ ) snake_case : Any = nn.silu(snake_case__ ) snake_case : Optional[int] = self.conv_out(snake_case__ ) snake_case : Union[str, Any] = jnp.transpose(snake_case__ , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=snake_case__ )
59
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Optional[int] =(IPNDMScheduler,) __lowerCamelCase : int =(('num_inference_steps', 50),) def UpperCamelCase_ ( self : str , **__lowercase : Dict ): '''simple docstring''' __a = {"""num_train_timesteps""": 1000} config.update(**__lowercase ) return config def UpperCamelCase_ ( self : Any , __lowercase : Tuple=0 , **__lowercase : Dict ): '''simple docstring''' __a = dict(self.forward_default_kwargs ) __a = kwargs.pop("""num_inference_steps""" , __lowercase ) __a = self.dummy_sample __a = 0.1 * sample __a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __a = self.get_scheduler_config(**__lowercase ) __a = scheduler_class(**__lowercase ) scheduler.set_timesteps(__lowercase ) # copy over dummy past residuals __a = dummy_past_residuals[:] if time_step is None: __a = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowercase ) __a = scheduler_class.from_pretrained(__lowercase ) new_scheduler.set_timesteps(__lowercase ) # copy over dummy past residuals __a = dummy_past_residuals[:] __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self : str ): '''simple docstring''' pass def UpperCamelCase_ ( self : str , __lowercase : int=0 , **__lowercase : Dict ): '''simple docstring''' __a = dict(self.forward_default_kwargs ) __a = kwargs.pop("""num_inference_steps""" , __lowercase ) __a = self.dummy_sample __a = 0.1 * sample __a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __a = self.get_scheduler_config() __a = scheduler_class(**__lowercase ) scheduler.set_timesteps(__lowercase ) # copy over dummy past residuals (must be after setting timesteps) __a = dummy_past_residuals[:] if time_step is None: __a = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowercase ) __a = scheduler_class.from_pretrained(__lowercase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowercase ) # copy over dummy past residual (must be after setting timesteps) __a = dummy_past_residuals[:] __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self : List[str] , **__lowercase : Dict ): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config(**__lowercase ) __a = scheduler_class(**__lowercase ) __a = 10 __a = self.dummy_model() __a = self.dummy_sample_deter scheduler.set_timesteps(__lowercase ) for i, t in enumerate(scheduler.timesteps ): __a = model(__lowercase , __lowercase ) __a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample for i, t in enumerate(scheduler.timesteps ): __a = model(__lowercase , __lowercase ) __a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample return sample def UpperCamelCase_ ( self : str ): '''simple docstring''' __a = dict(self.forward_default_kwargs ) __a = kwargs.pop("""num_inference_steps""" , __lowercase ) for scheduler_class in self.scheduler_classes: __a = self.get_scheduler_config() __a = scheduler_class(**__lowercase ) __a = self.dummy_sample __a = 0.1 * sample if num_inference_steps is not None and hasattr(__lowercase , """set_timesteps""" ): scheduler.set_timesteps(__lowercase ) elif num_inference_steps is not None and not hasattr(__lowercase , """set_timesteps""" ): __a = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] __a = dummy_past_residuals[:] __a = scheduler.timesteps[5] __a = scheduler.timesteps[6] __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=__lowercase , time_step=__lowercase ) def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=__lowercase , time_step=__lowercase ) def UpperCamelCase_ ( self : int ): '''simple docstring''' __a = self.full_loop() __a = torch.mean(torch.abs(__lowercase ) ) assert abs(result_mean.item() - 2540529 ) < 10
302
0
"""simple docstring""" import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class snake_case_( unittest.TestCase ): def __init__( self : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any=7 , UpperCamelCase_ : str=3 , UpperCamelCase_ : str=1_8 , UpperCamelCase_ : int=3_0 , UpperCamelCase_ : Union[str, Any]=4_0_0 , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=True , ): lowerCAmelCase : Dict = size if size is not None else {'''height''': 1_8, '''width''': 1_8} lowerCAmelCase : List[Any] = parent lowerCAmelCase : List[str] = batch_size lowerCAmelCase : int = num_channels lowerCAmelCase : Any = image_size lowerCAmelCase : List[str] = min_resolution lowerCAmelCase : Dict = max_resolution lowerCAmelCase : List[str] = do_resize lowerCAmelCase : Union[str, Any] = size lowerCAmelCase : str = do_normalize def lowerCamelCase__ ( self : List[Any] ): return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804], [-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class snake_case_( a__ , unittest.TestCase ): __UpperCamelCase = ImageGPTImageProcessor if is_vision_available() else None def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : List[str] = ImageGPTImageProcessingTester(self ) @property def lowerCamelCase__ ( self : Optional[int] ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase__ ( self : int ): lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , '''clusters''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8} ) lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} ) def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict ) lowerCAmelCase : List[Any] = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(UpperCamelCase_ , obj[key] ) ) else: self.assertEqual(obj[key] , UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase : List[str] = os.path.join(UpperCamelCase_ , '''image_processor.json''' ) image_processor_first.to_json_file(UpperCamelCase_ ) lowerCAmelCase : Tuple = self.image_processing_class.from_json_file(UpperCamelCase_ ).to_dict() lowerCAmelCase : Tuple = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(UpperCamelCase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , UpperCamelCase_ ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Any = self.image_processing_class.from_pretrained(UpperCamelCase_ ).to_dict() lowerCAmelCase : Optional[int] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(UpperCamelCase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , UpperCamelCase_ ) @unittest.skip('''ImageGPT requires clusters at initialization''' ) def lowerCamelCase__ ( self : Dict ): pass def _snake_case ( ): lowerCAmelCase : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' ) lowerCAmelCase : Tuple = Image.open(dataset[4]['''file'''] ) lowerCAmelCase : str = Image.open(dataset[5]['''file'''] ) lowerCAmelCase : Dict = [imagea, imagea] return images @require_vision @require_torch class snake_case_( unittest.TestCase ): @slow def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Any = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' ) lowerCAmelCase : Union[str, Any] = prepare_images() # test non-batched lowerCAmelCase : Optional[Any] = image_processing(images[0] , return_tensors='''pt''' ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) ) lowerCAmelCase : int = [3_0_6, 1_9_1, 1_9_1] self.assertEqual(encoding.input_ids[0, :3].tolist() , UpperCamelCase_ ) # test batched lowerCAmelCase : Dict = image_processing(UpperCamelCase_ , return_tensors='''pt''' ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) ) lowerCAmelCase : Union[str, Any] = [3_0_3, 1_3, 1_3] self.assertEqual(encoding.input_ids[1, -3:].tolist() , UpperCamelCase_ )
60
from __future__ import annotations lowerCamelCase__ = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } class SCREAMING_SNAKE_CASE : def __init__( self : Tuple , __lowercase : dict[str, list[str]] , __lowercase : str ): '''simple docstring''' __a = graph # mapping node to its parent in resulting breadth first tree __a = {} __a = source_vertex def UpperCamelCase_ ( self : Any ): '''simple docstring''' __a = {self.source_vertex} __a = None __a = [self.source_vertex] # first in first out queue while queue: __a = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(__lowercase ) __a = vertex queue.append(__lowercase ) def UpperCamelCase_ ( self : Optional[int] , __lowercase : str ): '''simple docstring''' if target_vertex == self.source_vertex: return self.source_vertex __a = self.parent.get(__lowercase ) if target_vertex_parent is None: __a = ( F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" ) raise ValueError(__lowercase ) return self.shortest_path(__lowercase ) + F"->{target_vertex}" if __name__ == "__main__": lowerCamelCase__ = Graph(graph, """G""") g.breath_first_search() print(g.shortest_path("""D""")) print(g.shortest_path("""G""")) print(g.shortest_path("""Foo"""))
302
0
"""simple docstring""" import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ): if attention_mask is None: UpperCAmelCase_ : List[str] = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: UpperCAmelCase_ : List[Any] = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: UpperCAmelCase_ : Dict = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=__lowerCamelCase ) if decoder_head_mask is None: UpperCAmelCase_ : Dict = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=__lowerCamelCase ) if cross_attn_head_mask is None: UpperCAmelCase_ : Optional[Any] = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=__lowerCamelCase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=16 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="relu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=20 , lowercase_=2 , lowercase_=1 , lowercase_=0 , ): """simple docstring""" UpperCAmelCase_ : Optional[int] = parent UpperCAmelCase_ : Any = batch_size UpperCAmelCase_ : Dict = seq_length UpperCAmelCase_ : str = is_training UpperCAmelCase_ : Any = use_labels UpperCAmelCase_ : int = vocab_size UpperCAmelCase_ : Optional[Any] = hidden_size UpperCAmelCase_ : Optional[int] = num_hidden_layers UpperCAmelCase_ : Optional[int] = num_attention_heads UpperCAmelCase_ : Optional[Any] = intermediate_size UpperCAmelCase_ : Dict = hidden_act UpperCAmelCase_ : List[str] = hidden_dropout_prob UpperCAmelCase_ : Any = attention_probs_dropout_prob UpperCAmelCase_ : List[Any] = encoder_layerdrop UpperCAmelCase_ : Dict = decoder_layerdrop UpperCAmelCase_ : List[Any] = max_position_embeddings UpperCAmelCase_ : Any = eos_token_id UpperCAmelCase_ : Optional[int] = pad_token_id UpperCAmelCase_ : Union[str, Any] = bos_token_id def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Optional[int] = self.eos_token_id # Eos Token UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input UpperCAmelCase_ : Optional[Any] = input_ids.clamp(self.pad_token_id + 1 ) UpperCAmelCase_ : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1 ) UpperCAmelCase_ : Any = self.get_config() UpperCAmelCase_ : List[Any] = prepare_mam_aaa_inputs_dict(lowercase_ , lowercase_ , lowercase_ ) return config, inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs() return config, inputs_dict def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = MaMaaaModel(config=lowercase_ ).get_decoder().to(lowercase_ ).eval() UpperCAmelCase_ : int = inputs_dict["input_ids"] UpperCAmelCase_ : Optional[Any] = inputs_dict["attention_mask"] UpperCAmelCase_ : Optional[int] = inputs_dict["head_mask"] # first forward pass UpperCAmelCase_ : Any = model(lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , use_cache=lowercase_ ) UpperCAmelCase_ , UpperCAmelCase_ : Dict = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase_ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCAmelCase_ : int = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ : int = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCAmelCase_ : Any = model(lowercase_ , attention_mask=lowercase_ )["last_hidden_state"] UpperCAmelCase_ : List[str] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[ "last_hidden_state" ] # select random slice UpperCAmelCase_ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ : Any = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-2 ) ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[str] = MaMaaaModel(config=lowercase_ ).to(lowercase_ ).eval() UpperCAmelCase_ : Tuple = model(**lowercase_ ) UpperCAmelCase_ : int = outputs.encoder_last_hidden_state UpperCAmelCase_ : Any = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ : List[Any] = model.get_encoder() encoder.save_pretrained(lowercase_ ) UpperCAmelCase_ : Tuple = MaMaaaEncoder.from_pretrained(lowercase_ ).to(lowercase_ ) UpperCAmelCase_ : List[Any] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ : Optional[Any] = model.get_decoder() decoder.save_pretrained(lowercase_ ) UpperCAmelCase_ : List[Any] = MaMaaaDecoder.from_pretrained(lowercase_ ).to(lowercase_ ) UpperCAmelCase_ : str = decoder( input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=lowercase_ , encoder_attention_mask=inputs_dict["attention_mask"] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class A_ (lowercase__ ,lowercase__ ,lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = (MaMaaaForConditionalGeneration,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : Tuple = ( { """conversational""": MaMaaaForConditionalGeneration, """feature-extraction""": MaMaaaModel, """summarization""": MaMaaaForConditionalGeneration, """text2text-generation""": MaMaaaForConditionalGeneration, """translation""": MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Optional[Any] = True SCREAMING_SNAKE_CASE__ : int = True SCREAMING_SNAKE_CASE__ : Any = False SCREAMING_SNAKE_CASE__ : Tuple = False def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = MaMaaaModelTester(self ) UpperCAmelCase_ : Any = ConfigTester(self , config_class=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = model_class(lowercase_ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase_ ) UpperCAmelCase_ , UpperCAmelCase_ : Any = model_class.from_pretrained(lowercase_ , output_loading_info=lowercase_ ) self.assertEqual(info["missing_keys"] , [] ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): UpperCAmelCase_ : Any = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self._prepare_for_class(lowercase_ , lowercase_ ) ) if not self.is_encoder_decoder: UpperCAmelCase_ : Tuple = inputs["input_ids"] del inputs["input_ids"] else: UpperCAmelCase_ : Optional[Any] = inputs["input_ids"] UpperCAmelCase_ : List[str] = inputs.get("decoder_input_ids" , lowercase_ ) del inputs["input_ids"] inputs.pop("decoder_input_ids" , lowercase_ ) UpperCAmelCase_ : List[str] = model.get_input_embeddings() if not self.is_encoder_decoder: UpperCAmelCase_ : Dict = wte(lowercase_ ) else: UpperCAmelCase_ : Dict = wte(lowercase_ ) UpperCAmelCase_ : Optional[int] = wte(lowercase_ ) with torch.no_grad(): model(**lowercase_ )[0] def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() UpperCAmelCase_ : Union[str, Any] = input_dict["input_ids"] UpperCAmelCase_ : List[str] = input_ids.ne(1 ).to(lowercase_ ) UpperCAmelCase_ : Tuple = MaMaaaForConditionalGeneration(lowercase_ ).eval().to(lowercase_ ) if torch_device == "cuda": model.half() model.generate(lowercase_ , attention_mask=lowercase_ ) model.generate(num_beams=4 , do_sample=lowercase_ , early_stopping=lowercase_ , num_return_sequences=3 ) def __a ( __lowerCamelCase ): return torch.tensor(__lowerCamelCase, dtype=torch.long, device=__lowerCamelCase ) _a = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class A_ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase__ ( self ): """simple docstring""" return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(lowercase_ ) UpperCAmelCase_ : str = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] ) UpperCAmelCase_ : str = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] ) UpperCAmelCase_ : str = prepare_mam_aaa_inputs_dict(model.config , lowercase_ , lowercase_ ) with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(**lowercase_ )[0] UpperCAmelCase_ : Optional[int] = torch.Size((1, 11, 1024) ) self.assertEqual(output.shape , lowercase_ ) # change to expected output here UpperCAmelCase_ : List[Any] = torch.tensor( [[-0.77_80, -0.16_76, 0.10_38], [-6.75_56, -1.39_92, 0.05_67], [-7.53_83, -0.59_20, -0.27_79]] , device=lowercase_ ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=lowercase_ ) ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(lowercase_ ) # change to intended input UpperCAmelCase_ : str = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] ) UpperCAmelCase_ : Optional[int] = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] ) UpperCAmelCase_ : str = prepare_mam_aaa_inputs_dict(model.config , lowercase_ , lowercase_ ) with torch.no_grad(): UpperCAmelCase_ : Dict = model(**lowercase_ )[0] UpperCAmelCase_ : str = torch.Size((1, 11, model.config.vocab_size) ) self.assertEqual(output.shape , lowercase_ ) # change to expected output here UpperCAmelCase_ : Any = torch.tensor( [[-1.04_48, -1.04_11, 3.79_92], [-3.21_91, -3.23_86, -1.34_51], [-3.62_10, -3.59_93, 0.49_25]] , device=lowercase_ ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=lowercase_ ) ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(lowercase_ ) UpperCAmelCase_ : Tuple = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" ) UpperCAmelCase_ : str = [ "L'affaire NSA souligne l'absence totale de débat sur le renseignement", "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent" " Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de" " l'ampleur de la surveillance américaine sur l'ensemble des communications en France.", ] # The below article tests that we don't add any hypotheses outside of the top n_beams UpperCAmelCase_ : List[Any] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" ) UpperCAmelCase_ : Optional[int] = model.generate( input_ids=dct["input_ids"].to(lowercase_ ) , attention_mask=dct["attention_mask"].to(lowercase_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , ) UpperCAmelCase_ : List[Any] = [ "The NSA case highlights the total absence of intelligence debate", "I think there are two levels of response from the French government.", "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S." " Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all" " communications in France.", ] UpperCAmelCase_ : List[str] = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=lowercase_ , skip_special_tokens=lowercase_ ) assert generated == expected_en
61
import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Tuple =KandinskyVaaPriorPipeline __lowerCamelCase : Union[str, Any] =['prompt'] __lowerCamelCase : Any =['prompt', 'negative_prompt'] __lowerCamelCase : List[str] =[ 'num_images_per_prompt', 'generator', 'num_inference_steps', 'latents', 'negative_prompt', 'guidance_scale', 'output_type', 'return_dict', ] __lowerCamelCase : List[Any] =False @property def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return 32 @property def UpperCamelCase_ ( self : Any ): '''simple docstring''' return 32 @property def UpperCamelCase_ ( self : str ): '''simple docstring''' return self.time_input_dim @property def UpperCamelCase_ ( self : str ): '''simple docstring''' return self.time_input_dim * 4 @property def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return 100 @property def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' __a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' torch.manual_seed(0 ) __a = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(__lowercase ) @property def UpperCamelCase_ ( self : int ): '''simple docstring''' torch.manual_seed(0 ) __a = { """num_attention_heads""": 2, """attention_head_dim""": 12, """embedding_dim""": self.text_embedder_hidden_size, """num_layers""": 1, } __a = PriorTransformer(**__lowercase ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 __a = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' torch.manual_seed(0 ) __a = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) __a = CLIPVisionModelWithProjection(__lowercase ) return model @property def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' __a = CLIPImageProcessor( crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , ) return image_processor def UpperCamelCase_ ( self : str ): '''simple docstring''' __a = self.dummy_prior __a = self.dummy_image_encoder __a = self.dummy_text_encoder __a = self.dummy_tokenizer __a = self.dummy_image_processor __a = UnCLIPScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=10.0 , ) __a = { """prior""": prior, """image_encoder""": image_encoder, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """scheduler""": scheduler, """image_processor""": image_processor, } return components def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[str] , __lowercase : Any=0 ): '''simple docstring''' if str(__lowercase ).startswith("""mps""" ): __a = torch.manual_seed(__lowercase ) else: __a = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __a = { """prompt""": """horse""", """generator""": generator, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' __a = """cpu""" __a = self.get_dummy_components() __a = self.pipeline_class(**__lowercase ) __a = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __a = pipe(**self.get_dummy_inputs(__lowercase ) ) __a = output.image_embeds __a = pipe( **self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0] __a = image[0, -10:] __a = image_from_tuple[0, -10:] assert image.shape == (1, 32) __a = np.array( [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def UpperCamelCase_ ( self : Dict ): '''simple docstring''' __a = torch_device == """cpu""" __a = True __a = False self._test_inference_batch_single_identical( test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , ) @skip_mps def UpperCamelCase_ ( self : Any ): '''simple docstring''' __a = torch_device == """cpu""" __a = False self._test_attention_slicing_forward_pass( test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
302
0
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> int: __UpperCamelCase =parent __UpperCamelCase =batch_size __UpperCamelCase =seq_length __UpperCamelCase =is_training __UpperCamelCase =use_input_mask __UpperCamelCase =use_token_type_ids __UpperCamelCase =use_labels __UpperCamelCase =vocab_size __UpperCamelCase =hidden_size __UpperCamelCase =num_hidden_layers __UpperCamelCase =num_attention_heads __UpperCamelCase =intermediate_size __UpperCamelCase =hidden_act __UpperCamelCase =hidden_dropout_prob __UpperCamelCase =attention_probs_dropout_prob __UpperCamelCase =max_position_embeddings __UpperCamelCase =type_vocab_size __UpperCamelCase =type_sequence_label_size __UpperCamelCase =initializer_range __UpperCamelCase =num_labels __UpperCamelCase =num_choices __UpperCamelCase =scope def _a ( self ) -> Dict: __UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase =None if self.use_input_mask: __UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase =None if self.use_token_type_ids: __UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCamelCase =None __UpperCamelCase =None __UpperCamelCase =None if self.use_labels: __UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _a ( self ) -> int: return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , use_stable_embedding=A_ , ) def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict: __UpperCamelCase =OpenLlamaModel(config=A_ ) model.to(A_ ) model.eval() __UpperCamelCase =model(A_ , attention_mask=A_ ) __UpperCamelCase =model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]: __UpperCamelCase =True __UpperCamelCase =OpenLlamaModel(A_ ) model.to(A_ ) model.eval() __UpperCamelCase =model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , ) __UpperCamelCase =model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , ) __UpperCamelCase =model(A_ , attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Union[str, Any]: __UpperCamelCase =OpenLlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() __UpperCamelCase =model(A_ , attention_mask=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> List[Any]: __UpperCamelCase =True __UpperCamelCase =True __UpperCamelCase =OpenLlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() # first forward pass __UpperCamelCase =model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , ) __UpperCamelCase =outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __UpperCamelCase =ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCamelCase =ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __UpperCamelCase =torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCamelCase =torch.cat([input_mask, next_mask] , dim=-1 ) __UpperCamelCase =model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['hidden_states'][0] __UpperCamelCase =model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['hidden_states'][0] # select random slice __UpperCamelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCamelCase =output_from_no_past[:, -3:, random_slice_idx].detach() __UpperCamelCase =output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1E-3 ) ) def _a ( self ) -> List[str]: __UpperCamelCase =self.prepare_config_and_inputs() ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) =config_and_inputs __UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase__ ( A_ , A_ , A_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Optional[int] = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) UpperCAmelCase__ : List[Any] = (OpenLlamaForCausalLM,) if is_torch_available() else () UpperCAmelCase__ : str = ( { "feature-extraction": OpenLlamaModel, "text-classification": OpenLlamaForSequenceClassification, "text-generation": OpenLlamaForCausalLM, "zero-shot": OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : Optional[int] = False def _a ( self ) -> List[str]: __UpperCamelCase =OpenLlamaModelTester(self ) __UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 ) def _a ( self ) -> Tuple: self.config_tester.run_common_tests() def _a ( self ) -> Any: __UpperCamelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _a ( self ) -> str: __UpperCamelCase =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCamelCase =type self.model_tester.create_and_check_model(*A_ ) def _a ( self ) -> Dict: __UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase =3 __UpperCamelCase =input_dict['input_ids'] __UpperCamelCase =input_ids.ne(1 ).to(A_ ) __UpperCamelCase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCamelCase =OpenLlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() __UpperCamelCase =model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _a ( self ) -> Tuple: __UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase =3 __UpperCamelCase ='single_label_classification' __UpperCamelCase =input_dict['input_ids'] __UpperCamelCase =input_ids.ne(1 ).to(A_ ) __UpperCamelCase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCamelCase =OpenLlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() __UpperCamelCase =model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _a ( self ) -> Tuple: __UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase =3 __UpperCamelCase ='multi_label_classification' __UpperCamelCase =input_dict['input_ids'] __UpperCamelCase =input_ids.ne(1 ).to(A_ ) __UpperCamelCase =ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __UpperCamelCase =OpenLlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() __UpperCamelCase =model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' ) def _a ( self ) -> List[Any]: pass @parameterized.expand([('linear',), ('dynamic',)] ) def _a ( self , A_ ) -> Tuple: __UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase =ids_tensor([1, 10] , config.vocab_size ) __UpperCamelCase =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCamelCase =OpenLlamaModel(A_ ) original_model.to(A_ ) original_model.eval() __UpperCamelCase =original_model(A_ ).last_hidden_state __UpperCamelCase =original_model(A_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCamelCase ={'type': scaling_type, 'factor': 10.0} __UpperCamelCase =OpenLlamaModel(A_ ) scaled_model.to(A_ ) scaled_model.eval() __UpperCamelCase =scaled_model(A_ ).last_hidden_state __UpperCamelCase =scaled_model(A_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A_ , A_ , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(A_ , A_ , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A_ , A_ , atol=1E-5 ) )
62
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = Dict[str, Any] lowerCamelCase__ = List[Prediction] @add_end_docstrings(lowerCamelCase__ ) class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): def __init__( self : Tuple , *__lowercase : Tuple , **__lowercase : Optional[int] ): '''simple docstring''' super().__init__(*__lowercase , **__lowercase ) if self.framework == "tf": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) requires_backends(self , """vision""" ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def UpperCamelCase_ ( self : Optional[int] , **__lowercase : List[str] ): '''simple docstring''' __a = {} if "threshold" in kwargs: __a = kwargs["""threshold"""] return {}, {}, postprocess_kwargs def __call__( self : List[Any] , *__lowercase : Any , **__lowercase : Tuple ): '''simple docstring''' return super().__call__(*__lowercase , **__lowercase ) def UpperCamelCase_ ( self : str , __lowercase : Tuple ): '''simple docstring''' __a = load_image(__lowercase ) __a = torch.IntTensor([[image.height, image.width]] ) __a = self.image_processor(images=[image] , return_tensors="""pt""" ) if self.tokenizer is not None: __a = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" ) __a = target_size return inputs def UpperCamelCase_ ( self : Dict , __lowercase : List[str] ): '''simple docstring''' __a = model_inputs.pop("""target_size""" ) __a = self.model(**__lowercase ) __a = outputs.__class__({"""target_size""": target_size, **outputs} ) if self.tokenizer is not None: __a = model_inputs["""bbox"""] return model_outputs def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any]=0.9 ): '''simple docstring''' __a = model_outputs["""target_size"""] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. __a , __a = target_size[0].tolist() def unnormalize(__lowercase : Optional[Any] ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) __a , __a = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) __a = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] __a = [unnormalize(__lowercase ) for bbox in model_outputs["""bbox"""].squeeze(0 )] __a = ["""score""", """label""", """box"""] __a = [dict(zip(__lowercase , __lowercase ) ) for vals in zip(scores.tolist() , __lowercase , __lowercase ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel __a = self.image_processor.post_process_object_detection(__lowercase , __lowercase , __lowercase ) __a = raw_annotations[0] __a = raw_annotation["""scores"""] __a = raw_annotation["""labels"""] __a = raw_annotation["""boxes"""] __a = scores.tolist() __a = [self.model.config.idalabel[label.item()] for label in labels] __a = [self._get_bounding_box(__lowercase ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] __a = ["""score""", """label""", """box"""] __a = [ dict(zip(__lowercase , __lowercase ) ) for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] ) ] return annotation def UpperCamelCase_ ( self : Optional[int] , __lowercase : "torch.Tensor" ): '''simple docstring''' if self.framework != "pt": raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" ) __a , __a , __a , __a = box.int().tolist() __a = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
302
0
'''simple docstring''' def _lowerCamelCase ( lowercase : list[int] , lowercase : list[int] ) -> None: _a = len(lowercase ) print("The following activities are selected:" ) # The first activity is always selected _a = 0 print(lowercase , end="," ) # Consider rest of the activities for j in range(lowercase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(lowercase , end="," ) _a = j if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase_ : Any = [1, 3, 0, 5, 8, 5] lowerCAmelCase_ : Optional[int] = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
63
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase__ = { """configuration_efficientnet""": [ """EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientNetConfig""", """EfficientNetOnnxConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""EfficientNetImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientNetForImageClassification""", """EfficientNetModel""", """EfficientNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
302
0
"""simple docstring""" from __future__ import annotations def UpperCAmelCase__ (snake_case__ : list[float] ): """simple docstring""" _snake_case : int = 0.00 _snake_case : int = 0 for resistor in resistors: if resistor <= 0: _snake_case : Dict = F"Resistor at index {index} has a negative or zero value!" raise ValueError(snake_case__ ) first_sum += 1 / float(snake_case__ ) index += 1 return 1 / first_sum def UpperCAmelCase__ (snake_case__ : list[float] ): """simple docstring""" _snake_case : Union[str, Any] = 0.00 _snake_case : Any = 0 for resistor in resistors: sum_r += resistor if resistor < 0: _snake_case : Any = F"Resistor at index {index} has a negative value!" raise ValueError(snake_case__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
64
import random def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" __a , __a , __a = [], [], [] for element in data: if element < pivot: less.append(_SCREAMING_SNAKE_CASE ) elif element > pivot: greater.append(_SCREAMING_SNAKE_CASE ) else: equal.append(_SCREAMING_SNAKE_CASE ) return less, equal, greater def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0: return None __a = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )] __a = 0 __a , __a , __a = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a = len(_SCREAMING_SNAKE_CASE ) __a = len(_SCREAMING_SNAKE_CASE ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # must be in larger else: return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
302
0
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def lowerCAmelCase_ ( __A ) -> Optional[int]: '''simple docstring''' if is_torch_version("<", "2.0.0" ) or not hasattr(__A, "_dynamo" ): return False return isinstance(__A, torch._dynamo.eval_frame.OptimizedModule ) def lowerCAmelCase_ ( __A, __A = True ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase__ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) UpperCAmelCase__ = is_compiled_module(__A ) if is_compiled: UpperCAmelCase__ = model UpperCAmelCase__ = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(__A, __A ): UpperCAmelCase__ = model.module if not keep_fpaa_wrapper: UpperCAmelCase__ = getattr(__A, "forward" ) UpperCAmelCase__ = model.__dict__.pop("_original_forward", __A ) if original_forward is not None: while hasattr(__A, "__wrapped__" ): UpperCAmelCase__ = forward.__wrapped__ if forward == original_forward: break UpperCAmelCase__ = forward if getattr(__A, "_converted_to_transformer_engine", __A ): convert_model(__A, to_transformer_engine=__A ) if is_compiled: UpperCAmelCase__ = model UpperCAmelCase__ = compiled_model return model def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' PartialState().wait_for_everyone() def lowerCAmelCase_ ( __A, __A ) -> int: '''simple docstring''' if PartialState().distributed_type == DistributedType.TPU: xm.save(__A, __A ) elif PartialState().local_process_index == 0: torch.save(__A, __A ) @contextmanager def lowerCAmelCase_ ( **__A ) -> Optional[int]: '''simple docstring''' for key, value in kwargs.items(): UpperCAmelCase__ = str(__A ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def lowerCAmelCase_ ( __A ) -> Tuple: '''simple docstring''' if not hasattr(__A, "__qualname__" ) and not hasattr(__A, "__name__" ): UpperCAmelCase__ = getattr(__A, "__class__", __A ) if hasattr(__A, "__qualname__" ): return obj.__qualname__ if hasattr(__A, "__name__" ): return obj.__name__ return str(__A ) def lowerCAmelCase_ ( __A, __A ) -> Tuple: '''simple docstring''' for key, value in source.items(): if isinstance(__A, __A ): UpperCAmelCase__ = destination.setdefault(__A, {} ) merge_dicts(__A, __A ) else: UpperCAmelCase__ = value return destination def lowerCAmelCase_ ( __A = None ) -> bool: '''simple docstring''' if port is None: UpperCAmelCase__ = 29_500 with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s: return s.connect_ex(("localhost", port) ) == 0
65
from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline lowerCamelCase__ = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): def __init__( self : Optional[int] , **__lowercase : Dict ): '''simple docstring''' super().__init__(**__lowercase ) if self.framework != "pt": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) # No specific FOR_XXX available yet def __call__( self : str , __lowercase : Union[np.ndarray, bytes, str] , **__lowercase : int ): '''simple docstring''' return super().__call__(__lowercase , **__lowercase ) def UpperCamelCase_ ( self : List[Any] , **__lowercase : Union[str, Any] ): '''simple docstring''' __a = {} if "candidate_labels" in kwargs: __a = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: __a = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def UpperCamelCase_ ( self : int , __lowercase : Dict , __lowercase : Dict=None , __lowercase : str="This is a sound of {}." ): '''simple docstring''' if isinstance(__lowercase , __lowercase ): if audio.startswith("""http://""" ) or audio.startswith("""https://""" ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png __a = requests.get(__lowercase ).content else: with open(__lowercase , """rb""" ) as f: __a = f.read() if isinstance(__lowercase , __lowercase ): __a = ffmpeg_read(__lowercase , self.feature_extractor.sampling_rate ) if not isinstance(__lowercase , np.ndarray ): raise ValueError("""We expect a numpy ndarray as input""" ) if len(audio.shape ) != 1: raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" ) __a = self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" ) __a = candidate_labels __a = [hypothesis_template.format(__lowercase ) for x in candidate_labels] __a = self.tokenizer(__lowercase , return_tensors=self.framework , padding=__lowercase ) __a = [text_inputs] return inputs def UpperCamelCase_ ( self : Any , __lowercase : Any ): '''simple docstring''' __a = model_inputs.pop("""candidate_labels""" ) __a = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] , __lowercase ): __a = text_inputs[0] else: # Batching case. __a = text_inputs[0][0] __a = self.model(**__lowercase , **__lowercase ) __a = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_audio, } return model_outputs def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Dict ): '''simple docstring''' __a = model_outputs.pop("""candidate_labels""" ) __a = model_outputs["""logits"""][0] if self.framework == "pt": __a = logits.softmax(dim=0 ) __a = probs.tolist() else: raise ValueError("""`tf` framework not supported.""" ) __a = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(__lowercase , __lowercase ) , key=lambda __lowercase : -x[0] ) ] return result
302
0
"""simple docstring""" from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS __a = logging.get_logger(__name__) __a = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, "constant": get_constant_schedule, "constant_w_warmup": get_constant_schedule_with_warmup, } class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' def __init__( self: List[str] , snake_case: int=None , snake_case: str=None , *snake_case: Any , **snake_case: List[Any] ) -> Optional[int]: super().__init__(*snake_case , **snake_case ) if config is None: assert isinstance(self.model , snake_case ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f""" {self.model.__class__}""" ) snake_case_ :Optional[Any] = self.model.config else: snake_case_ :int = config snake_case_ :List[Any] = data_args snake_case_ :str = self.config.tgt_vocab_size if isinstance(self.config , snake_case ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for""" """ padding..""" ) if self.args.label_smoothing == 0: snake_case_ :Any = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss snake_case_ :Tuple = label_smoothed_nll_loss def lowerCAmelCase_ ( self: Optional[Any] , snake_case: int ) -> str: if self.optimizer is None: snake_case_ :List[Any] = ["""bias""", """LayerNorm.weight"""] snake_case_ :Union[str, Any] = [ { """params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], """weight_decay""": self.args.weight_decay, }, { """params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] snake_case_ :str = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: snake_case_ :Optional[int] = Adafactor snake_case_ :Dict = {"""scale_parameter""": False, """relative_step""": False} else: snake_case_ :Optional[Any] = AdamW snake_case_ :Optional[Any] = { """betas""": (self.args.adam_betaa, self.args.adam_betaa), """eps""": self.args.adam_epsilon, } snake_case_ :Any = self.args.learning_rate if self.sharded_ddp: snake_case_ :List[str] = OSS( params=snake_case , optim=snake_case , **snake_case , ) else: snake_case_ :Optional[int] = optimizer_cls(snake_case , **snake_case ) if self.lr_scheduler is None: snake_case_ :List[Any] = self._get_lr_scheduler(snake_case ) else: # ignoring --lr_scheduler logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" ) def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[Any] ) -> str: snake_case_ :int = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": snake_case_ :List[str] = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": snake_case_ :Any = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: snake_case_ :int = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=snake_case ) return scheduler def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[torch.utils.data.Sampler]: if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def lowerCAmelCase_ ( self: List[str] , snake_case: Tuple , snake_case: Optional[int] , snake_case: Union[str, Any] ) -> Optional[Any]: if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token snake_case_ :Union[str, Any] = model(**snake_case , use_cache=snake_case )[0] snake_case_ :Tuple = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models snake_case_, snake_case_ :Any = model(**snake_case , labels=snake_case , use_cache=snake_case )[:2] else: # compute label smoothed loss snake_case_ :List[Any] = model(**snake_case , use_cache=snake_case )[0] snake_case_ :Any = torch.nn.functional.log_softmax(snake_case , dim=-1 ) snake_case_, snake_case_ :List[str] = self.loss_fn(snake_case , snake_case , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def lowerCAmelCase_ ( self: str , snake_case: List[Any] , snake_case: List[Any] ) -> List[Any]: snake_case_ :int = inputs.pop("""labels""" ) snake_case_, snake_case_ :Any = self._compute_loss(snake_case , snake_case , snake_case ) return loss def lowerCAmelCase_ ( self: List[Any] , snake_case: nn.Module , snake_case: Dict[str, Union[torch.Tensor, Any]] , snake_case: bool , snake_case: Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: snake_case_ :Optional[int] = self._prepare_inputs(snake_case ) snake_case_ :Optional[int] = { """max_length""": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, """num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: snake_case_ :Union[str, Any] = self.model.generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **snake_case , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: snake_case_ :Optional[int] = self._pad_tensors_to_max_len(snake_case , gen_kwargs["""max_length"""] ) snake_case_ :str = inputs.pop("""labels""" ) with torch.no_grad(): # compute loss on predict data snake_case_, snake_case_ :str = self._compute_loss(snake_case , snake_case , snake_case ) snake_case_ :Optional[int] = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) snake_case_ :Optional[int] = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: snake_case_ :List[Any] = self._pad_tensors_to_max_len(snake_case , gen_kwargs["""max_length"""] ) return (loss, logits, labels) def lowerCAmelCase_ ( self: str , snake_case: List[Any] , snake_case: Optional[int] ) -> int: # If PAD token is not defined at least EOS token has to be defined snake_case_ :List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( """Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be""" f""" padded to `max_length`={max_length}""" ) snake_case_ :List[str] = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) snake_case_ :str = tensor return padded_tensor
66
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowerCamelCase__ = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Dict =['pixel_values'] def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Dict , ): '''simple docstring''' super().__init__(**__lowercase ) __a = size if size is not None else {"""height""": 224, """width""": 224} __a = get_size_dict(__lowercase ) __a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __a = get_size_dict(__lowercase , default_to_square=__lowercase , param_name="""crop_size""" ) __a = do_resize __a = do_rescale __a = do_normalize __a = do_center_crop __a = crop_size __a = size __a = resample __a = rescale_factor __a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __a = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ): '''simple docstring''' __a = get_size_dict(__lowercase ) if "shortest_edge" in size: __a = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: __a = (size["""height"""], size["""width"""]) else: raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" ) return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase ) def UpperCamelCase_ ( self : str , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ): '''simple docstring''' __a = get_size_dict(__lowercase ) if "height" not in size or "width" not in size: raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" ) return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase ) def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str ): '''simple docstring''' return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase ) def UpperCamelCase_ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ): '''simple docstring''' return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase ) def UpperCamelCase_ ( self : Tuple , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : int = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : List[Any] , ): '''simple docstring''' __a = do_resize if do_resize is not None else self.do_resize __a = do_rescale if do_rescale is not None else self.do_rescale __a = do_normalize if do_normalize is not None else self.do_normalize __a = do_center_crop if do_center_crop is not None else self.do_center_crop __a = crop_size if crop_size is not None else self.crop_size __a = get_size_dict(__lowercase , param_name="""crop_size""" , default_to_square=__lowercase ) __a = resample if resample is not None else self.resample __a = rescale_factor if rescale_factor is not None else self.rescale_factor __a = image_mean if image_mean is not None else self.image_mean __a = image_std if image_std is not None else self.image_std __a = size if size is not None else self.size __a = get_size_dict(__lowercase ) if not is_batched(__lowercase ): __a = [images] if not valid_images(__lowercase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. __a = [to_numpy_array(__lowercase ) for image in images] if do_resize: __a = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images] if do_center_crop: __a = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images] if do_rescale: __a = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images] if do_normalize: __a = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images] __a = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images] __a = {"""pixel_values""": images} return BatchFeature(data=__lowercase , tensor_type=__lowercase )
302
0
'''simple docstring''' import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging __UpperCAmelCase =logging.get_logger(__name__) def __lowerCAmelCase ( UpperCamelCase__=None , UpperCamelCase__=None ) -> int: return field(default_factory=lambda: default , metadata=UpperCamelCase__ ) @dataclass class a__ : lowerCamelCase : List[str] =list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) lowerCamelCase : List[int] =list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) lowerCamelCase : List[int] =list_field( default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) lowerCamelCase : bool =field( default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) lowerCamelCase : bool =field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) lowerCamelCase : bool =field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} ) lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} ) lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} ) lowerCamelCase : bool =field( default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) lowerCamelCase : bool =field( default=UpperCAmelCase__ , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} ) lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} ) lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} ) lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} ) lowerCamelCase : bool =field( default=UpperCAmelCase__ , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) lowerCamelCase : str =field( default=F'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , ) lowerCamelCase : str =field( default=F'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , ) lowerCamelCase : str =field( default=F'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) lowerCamelCase : str =field( default=F'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) lowerCamelCase : str =field( default=F'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , ) lowerCamelCase : str =field( default=F'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , ) lowerCamelCase : int =field(default=3 , metadata={"help": "Times an experiment will be run."} ) lowerCamelCase : bool =field( default=UpperCAmelCase__ , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" warnings.warn( f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils""" ''' are deprecated in general and it is advised to use external Benchmarking libraries ''' ''' to benchmark Transformer models.''' , a , ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" if len(self.models ) <= 0: raise ValueError( '''Please make sure you provide at least one model name / model identifier, *e.g.* `--models''' ''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' ) return self.models @property def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" if not self.multi_process: return False elif self.is_tpu: logger.info('''Multiprocessing is currently not possible on TPU.''' ) return False else: return True
67
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' __a = """hf-internal-testing/tiny-random-t5""" __a = AutoTokenizer.from_pretrained(__lowercase ) __a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ) __a = tokenizer("""This is me""" , return_tensors="""pt""" ) __a = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) __a = model.generate(**__lowercase ) __a = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowercase ) __a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) __a = model_reloaded.generate(**__lowercase ) self.assertTrue(torch.allclose(__lowercase , __lowercase ) ) def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' __a = """hf-internal-testing/tiny-random-t5""" __a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ) __a = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(__lowercase ): model.save_pretrained(__lowercase ) __a = model.reverse_bettertransformer() model.save_pretrained(__lowercase )
302
0
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = (PNDMScheduler,) __lowerCamelCase = (('num_inference_steps', 50),) def UpperCamelCase ( self , **lowercase ) -> Tuple: '''simple docstring''' A__ = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**lowercase ) return config def UpperCamelCase ( self , lowercase=0 , **lowercase ) -> Any: '''simple docstring''' A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop("num_inference_steps" , lowercase ) A__ = self.dummy_sample A__ = 0.1 * sample A__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config(**lowercase ) A__ = scheduler_class(**lowercase ) scheduler.set_timesteps(lowercase ) # copy over dummy past residuals A__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase ) A__ = scheduler_class.from_pretrained(lowercase ) new_scheduler.set_timesteps(lowercase ) # copy over dummy past residuals A__ = dummy_past_residuals[:] A__ = scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample A__ = new_scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A__ = scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample A__ = new_scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' pass def UpperCamelCase ( self , lowercase=0 , **lowercase ) -> Tuple: '''simple docstring''' A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop("num_inference_steps" , lowercase ) A__ = self.dummy_sample A__ = 0.1 * sample A__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config() A__ = scheduler_class(**lowercase ) scheduler.set_timesteps(lowercase ) # copy over dummy past residuals (must be after setting timesteps) A__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase ) A__ = scheduler_class.from_pretrained(lowercase ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase ) # copy over dummy past residual (must be after setting timesteps) A__ = dummy_past_residuals[:] A__ = scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample A__ = new_scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A__ = scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample A__ = new_scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def UpperCamelCase ( self , **lowercase ) -> Dict: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(**lowercase ) A__ = scheduler_class(**lowercase ) A__ = 10 A__ = self.dummy_model() A__ = self.dummy_sample_deter scheduler.set_timesteps(lowercase ) for i, t in enumerate(scheduler.prk_timesteps ): A__ = model(lowercase , lowercase ) A__ = scheduler.step_prk(lowercase , lowercase , lowercase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): A__ = model(lowercase , lowercase ) A__ = scheduler.step_plms(lowercase , lowercase , lowercase ).prev_sample return sample def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop("num_inference_steps" , lowercase ) for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config() A__ = scheduler_class(**lowercase ) A__ = self.dummy_sample A__ = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase , "set_timesteps" ): scheduler.set_timesteps(lowercase ) elif num_inference_steps is not None and not hasattr(lowercase , "set_timesteps" ): A__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] A__ = dummy_past_residuals[:] A__ = scheduler.step_prk(lowercase , 0 , lowercase , **lowercase ).prev_sample A__ = scheduler.step_prk(lowercase , 1 , lowercase , **lowercase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) A__ = scheduler.step_plms(lowercase , 0 , lowercase , **lowercase ).prev_sample A__ = scheduler.step_plms(lowercase , 1 , lowercase , **lowercase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=lowercase ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowercase ) A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(steps_offset=1 ) A__ = scheduler_class(**lowercase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=lowercase , beta_end=lowercase ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase ) def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' for t in [1, 5, 10]: self.check_over_forward(time_step=lowercase ) def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=lowercase ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = 27 for scheduler_class in self.scheduler_classes: A__ = self.dummy_sample A__ = 0.1 * sample A__ = self.get_scheduler_config() A__ = scheduler_class(**lowercase ) scheduler.set_timesteps(lowercase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): A__ = scheduler.step_prk(lowercase , lowercase , lowercase ).prev_sample def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' with self.assertRaises(lowercase ): A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**lowercase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = self.full_loop() A__ = torch.sum(torch.abs(lowercase ) ) A__ = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 198.1318 ) < 1e-2 assert abs(result_mean.item() - 0.2580 ) < 1e-3 def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = self.full_loop(prediction_type="v_prediction" ) A__ = torch.sum(torch.abs(lowercase ) ) A__ = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 67.3986 ) < 1e-2 assert abs(result_mean.item() - 0.0878 ) < 1e-3 def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = self.full_loop(set_alpha_to_one=lowercase , beta_start=0.01 ) A__ = torch.sum(torch.abs(lowercase ) ) A__ = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 230.0399 ) < 1e-2 assert abs(result_mean.item() - 0.2995 ) < 1e-3 def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = self.full_loop(set_alpha_to_one=lowercase , beta_start=0.01 ) A__ = torch.sum(torch.abs(lowercase ) ) A__ = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 186.9482 ) < 1e-2 assert abs(result_mean.item() - 0.2434 ) < 1e-3
68
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig lowerCamelCase__ = { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Optional[Any] ='albert' def __init__( self : Optional[Any] , __lowercase : Union[str, Any]=30000 , __lowercase : List[str]=128 , __lowercase : Optional[Any]=4096 , __lowercase : Dict=12 , __lowercase : Any=1 , __lowercase : Optional[Any]=64 , __lowercase : Any=16384 , __lowercase : Any=1 , __lowercase : Union[str, Any]="gelu_new" , __lowercase : List[str]=0 , __lowercase : int=0 , __lowercase : Dict=512 , __lowercase : str=2 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=1E-12 , __lowercase : int=0.1 , __lowercase : Any="absolute" , __lowercase : Optional[int]=0 , __lowercase : Dict=2 , __lowercase : Optional[Any]=3 , **__lowercase : Any , ): '''simple docstring''' super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) __a = vocab_size __a = embedding_size __a = hidden_size __a = num_hidden_layers __a = num_hidden_groups __a = num_attention_heads __a = inner_group_num __a = hidden_act __a = intermediate_size __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = type_vocab_size __a = initializer_range __a = layer_norm_eps __a = classifier_dropout_prob __a = position_embedding_type class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): @property def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' if self.task == "multiple-choice": __a = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __a = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
302
0
"""simple docstring""" from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class UpperCamelCase : pass
69
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCamelCase__ = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
302
0
'''simple docstring''' from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" ) _lowerCAmelCase = parser.add_subparsers(help="""transformers-cli command helpers""" ) # Register commands ConvertCommand.register_subcommand(lowerCAmelCase ) DownloadCommand.register_subcommand(lowerCAmelCase ) EnvironmentCommand.register_subcommand(lowerCAmelCase ) RunCommand.register_subcommand(lowerCAmelCase ) ServeCommand.register_subcommand(lowerCAmelCase ) UserCommands.register_subcommand(lowerCAmelCase ) AddNewModelCommand.register_subcommand(lowerCAmelCase ) AddNewModelLikeCommand.register_subcommand(lowerCAmelCase ) LfsCommands.register_subcommand(lowerCAmelCase ) PTtoTFCommand.register_subcommand(lowerCAmelCase ) # Let's go _lowerCAmelCase = parser.parse_args() if not hasattr(lowerCAmelCase , """func""" ): parser.print_help() exit(1 ) # Run _lowerCAmelCase = args.func(lowerCAmelCase ) service.run() if __name__ == "__main__": main()
70
class SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , __lowercase : Union[str, Any] ): '''simple docstring''' __a = val __a = None __a = None def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any ): '''simple docstring''' if self.val: if val < self.val: if self.left is None: __a = Node(__lowercase ) else: self.left.insert(__lowercase ) elif val > self.val: if self.right is None: __a = Node(__lowercase ) else: self.right.insert(__lowercase ) else: __a = val def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" if root: inorder(root.left , _SCREAMING_SNAKE_CASE ) res.append(root.val ) inorder(root.right , _SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" if len(_SCREAMING_SNAKE_CASE ) == 0: return arr __a = Node(arr[0] ) for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): root.insert(arr[i] ) # Traverse BST in order. __a = [] inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
302
0
from __future__ import annotations import typing from collections import Counter def A ( a_ ) -> typing.Counter[int]: __UpperCamelCase : typing.Counter[int] =Counter() for base in range(1 ,max_perimeter + 1 ): for perpendicular in range(a_ ,max_perimeter + 1 ): __UpperCamelCase : List[Any] =(base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(a_ ): __UpperCamelCase : Tuple =int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def A ( a_ = 1_000 ) -> int: __UpperCamelCase : Dict =pythagorean_triple(a_ ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(f"Perimeter {solution()} has maximum solutions")
71
import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): def UpperCamelCase_ ( self : str ): '''simple docstring''' __a = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__lowercase , """width_multiplier""" ) ) class SCREAMING_SNAKE_CASE : def __init__( self : Dict , __lowercase : Union[str, Any] , __lowercase : Dict=13 , __lowercase : int=64 , __lowercase : Tuple=2 , __lowercase : Tuple=3 , __lowercase : Tuple="swish" , __lowercase : List[Any]=3 , __lowercase : List[str]=32 , __lowercase : int=0.1 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[int]=True , __lowercase : Dict=True , __lowercase : Tuple=10 , __lowercase : str=None , __lowercase : Optional[Any]=0.25 , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0 , ): '''simple docstring''' __a = parent __a = batch_size __a = image_size __a = patch_size __a = num_channels __a = make_divisible(512 * width_multiplier , divisor=8 ) __a = hidden_act __a = conv_kernel_size __a = output_stride __a = classifier_dropout_prob __a = use_labels __a = is_training __a = num_labels __a = initializer_range __a = scope __a = width_multiplier __a = ffn_dropout __a = attn_dropout def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' __a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a = None __a = None if self.use_labels: __a = ids_tensor([self.batch_size] , self.num_labels ) __a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __a = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple ): '''simple docstring''' __a = MobileViTVaModel(config=__lowercase ) model.to(__lowercase ) model.eval() __a = model(__lowercase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] ): '''simple docstring''' __a = self.num_labels __a = MobileViTVaForImageClassification(__lowercase ) model.to(__lowercase ) model.eval() __a = model(__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : int , __lowercase : str , __lowercase : Any , __lowercase : int , __lowercase : List[str] ): '''simple docstring''' __a = self.num_labels __a = MobileViTVaForSemanticSegmentation(__lowercase ) model.to(__lowercase ) model.eval() __a = model(__lowercase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) __a = model(__lowercase , labels=__lowercase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCamelCase_ ( self : Dict ): '''simple docstring''' __a = self.prepare_config_and_inputs() __a , __a , __a , __a = config_and_inputs __a = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : List[Any] =( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) __lowerCamelCase : Any =( { 'feature-extraction': MobileViTVaModel, 'image-classification': MobileViTVaForImageClassification, 'image-segmentation': MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) __lowerCamelCase : Dict =False __lowerCamelCase : Optional[Any] =False __lowerCamelCase : int =False __lowerCamelCase : Any =False def UpperCamelCase_ ( self : Dict ): '''simple docstring''' __a = MobileViTVaModelTester(self ) __a = MobileViTVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase ) def UpperCamelCase_ ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" ) def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" ) def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' pass @unittest.skip(reason="""MobileViTV2 does not output attentions""" ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" ) def UpperCamelCase_ ( self : int ): '''simple docstring''' pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' pass def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = model_class(__lowercase ) __a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a = [*signature.parameters.keys()] __a = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __lowercase ) def UpperCamelCase_ ( self : Dict ): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def UpperCamelCase_ ( self : int ): '''simple docstring''' def check_hidden_states_output(__lowercase : List[str] , __lowercase : Optional[int] , __lowercase : List[str] ): __a = model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): __a = model(**self._prepare_for_class(__lowercase , __lowercase ) ) __a = outputs.hidden_states __a = 5 self.assertEqual(len(__lowercase ) , __lowercase ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. __a = 2 for i in range(len(__lowercase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a = True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowercase ) def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase ) @slow def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a = MobileViTVaModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def lowerCAmelCase__ ( ): """simple docstring""" __a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self : Dict ): '''simple docstring''' return ( MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ) if is_vision_available() else None ) @slow def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' __a = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to( __lowercase ) __a = self.default_image_processor __a = prepare_img() __a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase ) # forward pass with torch.no_grad(): __a = model(**__lowercase ) # verify the logits __a = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowercase ) __a = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) ) @slow def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) __a = model.to(__lowercase ) __a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) __a = prepare_img() __a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase ) # forward pass with torch.no_grad(): __a = model(**__lowercase ) __a = outputs.logits # verify the logits __a = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , __lowercase ) __a = torch.tensor( [ [[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]], [[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]], [[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]], ] , device=__lowercase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) ) @slow def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) __a = model.to(__lowercase ) __a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) __a = prepare_img() __a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase ) # forward pass with torch.no_grad(): __a = model(**__lowercase ) __a = outputs.logits.detach().cpu() __a = image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] ) __a = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , __lowercase ) __a = image_processor.post_process_semantic_segmentation(outputs=__lowercase ) __a = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , __lowercase )
302
0
"""simple docstring""" # Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar lowerCAmelCase__ = TypeVar('''T''') class __snake_case ( Generic[T]): def __init__( self : Tuple , __lowerCAmelCase : bool = True ): """simple docstring""" _lowerCamelCase : dict[T, list[T]] = {} # dictionary of lists _lowerCamelCase : Any = directed def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : T ): """simple docstring""" if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(__lowerCAmelCase ) self.adj_list[destination_vertex].append(__lowerCAmelCase ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(__lowerCAmelCase ) _lowerCamelCase : List[Any] = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: _lowerCamelCase : Any = [destination_vertex] _lowerCamelCase : Optional[int] = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(__lowerCAmelCase ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: _lowerCamelCase : List[Any] = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: _lowerCamelCase : Any = [destination_vertex] _lowerCamelCase : List[str] = [] return self def __repr__( self : Dict ): """simple docstring""" return pformat(self.adj_list )
72
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
302
0
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=False ) -> Optional[int]: try: __lowerCamelCase : int = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __lowerCamelCase : List[Any] = default else: # KEY is set, convert it to True or False. try: __lowerCamelCase : Any = strtobool(lowerCamelCase__ ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"If set, {key} must be yes or no." ) return _value a =parse_flag_from_env("""RUN_SLOW""", default=False) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict: return unittest.skip('Test was skipped' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]: return unittest.skipUnless(_run_slow_tests , 'test is slow' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any: return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]: return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[Any]: return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[str]: return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]: return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[str]: return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]: return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]: return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str: return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any: return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict: return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]: return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str: return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict: return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__=None , lowerCamelCase__=None ) -> Any: if test_case is None: return partial(lowerCamelCase__ , version=lowerCamelCase__ ) return unittest.skipUnless(is_torch_version('>=' , lowerCamelCase__ ) , F"test requires torch version >= {version}" )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]: return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict: return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict: return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(lowerCamelCase__ ) a =( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict: return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(lowerCamelCase__ ) class A_ ( unittest.TestCase ): _UpperCAmelCase : Union[str, Any] = True @classmethod def lowerCAmelCase ( cls : int): __lowerCamelCase : List[Any] = tempfile.mkdtemp() @classmethod def lowerCAmelCase ( cls : int): if os.path.exists(cls.tmpdir): shutil.rmtree(cls.tmpdir) def lowerCAmelCase ( self : Any): if self.clear_on_setup: for path in Path(self.tmpdir).glob('**/*'): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(SCREAMING_SNAKE_CASE__) class A_ ( unittest.TestCase ): def lowerCAmelCase ( self : List[Any]): super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class A_ ( unittest.TestCase ): def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[mock.Mock, List[mock.Mock]]): __lowerCamelCase : Tuple = mocks if isinstance(SCREAMING_SNAKE_CASE__ ,(tuple, list)) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict: __lowerCamelCase : int = AcceleratorState() __lowerCamelCase : Optional[int] = tensor[None].clone().to(state.device ) __lowerCamelCase : Dict = gather(lowerCamelCase__ ).cpu() __lowerCamelCase : Union[str, Any] = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , lowerCamelCase__ ): return False return True class A_ : def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int]): __lowerCamelCase : Union[str, Any] = returncode __lowerCamelCase : List[str] = stdout __lowerCamelCase : Tuple = stderr async def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]: while True: __lowerCamelCase : str = await stream.readline() if line: callback(lowerCamelCase__ ) else: break async def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=False ) -> _RunOutput: if echo: print('\nRunning: ' , ' '.join(lowerCamelCase__ ) ) __lowerCamelCase : str = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=lowerCamelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase__ , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __lowerCamelCase : int = [] __lowerCamelCase : str = [] def tee(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="" ): __lowerCamelCase : Any = line.decode('utf-8' ).rstrip() sink.append(lowerCamelCase__ ) if not quiet: print(lowerCamelCase__ , lowerCamelCase__ , file=lowerCamelCase__ ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda lowerCamelCase__ : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda lowerCamelCase__ : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stderr , label='stderr:' ) ) ), ] , timeout=lowerCamelCase__ , ) return _RunOutput(await p.wait() , lowerCamelCase__ , lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=1_8_0 , lowerCamelCase__=False , lowerCamelCase__=True ) -> _RunOutput: __lowerCamelCase : Union[str, Any] = asyncio.get_event_loop() __lowerCamelCase : str = loop.run_until_complete( _stream_subprocess(lowerCamelCase__ , env=lowerCamelCase__ , stdin=lowerCamelCase__ , timeout=lowerCamelCase__ , quiet=lowerCamelCase__ , echo=lowerCamelCase__ ) ) __lowerCamelCase : Union[str, Any] = ' '.join(lowerCamelCase__ ) if result.returncode > 0: __lowerCamelCase : Optional[int] = '\n'.join(result.stderr ) raise RuntimeError( F"'{cmd_str}' failed with returncode {result.returncode}\n\n" F"The combined stderr from workers follows:\n{stderr}" ) return result class A_ ( SCREAMING_SNAKE_CASE ): pass def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=False ) -> int: try: __lowerCamelCase : List[str] = subprocess.check_output(lowerCamelCase__ , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(lowerCamelCase__ , 'decode' ): __lowerCamelCase : str = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"Command `{' '.join(lowerCamelCase__ )}` failed with the following error:\n\n{e.output.decode()}" ) from e
73
import string import numpy def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" return b if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE ) class SCREAMING_SNAKE_CASE : __lowerCamelCase : List[str] =string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) __lowerCamelCase : List[Any] =numpy.vectorize(lambda lowerCamelCase__ : x % 36 ) __lowerCamelCase : Optional[Any] =numpy.vectorize(lowerCamelCase__ ) def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray ): '''simple docstring''' __a = self.modulus(__lowercase ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key __a = encrypt_key.shape[0] def UpperCamelCase_ ( self : Dict , __lowercase : str ): '''simple docstring''' return self.key_string.index(__lowercase ) def UpperCamelCase_ ( self : Dict , __lowercase : int ): '''simple docstring''' return self.key_string[round(__lowercase )] def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: __a = det % len(self.key_string ) __a = len(self.key_string ) if greatest_common_divisor(__lowercase , len(self.key_string ) ) != 1: __a = ( F"determinant modular {req_l} of encryption key({det}) " F"is not co prime w.r.t {req_l}.\nTry another key." ) raise ValueError(__lowercase ) def UpperCamelCase_ ( self : Dict , __lowercase : str ): '''simple docstring''' __a = [char for char in text.upper() if char in self.key_string] __a = chars[-1] while len(__lowercase ) % self.break_key != 0: chars.append(__lowercase ) return "".join(__lowercase ) def UpperCamelCase_ ( self : List[str] , __lowercase : str ): '''simple docstring''' __a = self.process_text(text.upper() ) __a = """""" for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ): __a = text[i : i + self.break_key] __a = [self.replace_letters(__lowercase ) for char in batch] __a = numpy.array([vec] ).T __a = self.modulus(self.encrypt_key.dot(__lowercase ) ).T.tolist()[ 0 ] __a = """""".join( self.replace_digits(__lowercase ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' __a = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: __a = det % len(self.key_string ) __a = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: __a = i break __a = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(__lowercase ) ) def UpperCamelCase_ ( self : Any , __lowercase : str ): '''simple docstring''' __a = self.make_decrypt_key() __a = self.process_text(text.upper() ) __a = """""" for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ): __a = text[i : i + self.break_key] __a = [self.replace_letters(__lowercase ) for char in batch] __a = numpy.array([vec] ).T __a = self.modulus(decrypt_key.dot(__lowercase ) ).T.tolist()[0] __a = """""".join( self.replace_digits(__lowercase ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def lowerCAmelCase__ ( ): """simple docstring""" __a = int(input("""Enter the order of the encryption key: """ ) ) __a = [] print("""Enter each row of the encryption key with space separated integers""" ) for _ in range(_SCREAMING_SNAKE_CASE ): __a = [int(_SCREAMING_SNAKE_CASE ) for x in input().split()] hill_matrix.append(_SCREAMING_SNAKE_CASE ) __a = HillCipher(numpy.array(_SCREAMING_SNAKE_CASE ) ) print("""Would you like to encrypt or decrypt some text? (1 or 2)""" ) __a = input("""\n1. Encrypt\n2. Decrypt\n""" ) if option == "1": __a = input("""What text would you like to encrypt?: """ ) print("""Your encrypted text is:""" ) print(hc.encrypt(_SCREAMING_SNAKE_CASE ) ) elif option == "2": __a = input("""What text would you like to decrypt?: """ ) print("""Your decrypted text is:""" ) print(hc.decrypt(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
302
0
"""simple docstring""" def _snake_case ( snake_case__ : List[str] ): A = 1 A = 2 while i * i <= n: A = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def _snake_case ( ): A = 1 A = 1 while True: i += 1 t_num += i if count_divisors(snake_case__ ) > 500: break return t_num if __name__ == "__main__": print(solution())
74
from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : List[Any] ='autoformer' __lowerCamelCase : str ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self : List[Any] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : str = "student_t" , __lowercase : str = "nll" , __lowercase : int = 1 , __lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase : bool = True , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : Optional[List[int]] = None , __lowercase : Optional[List[int]] = None , __lowercase : int = 64 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 32 , __lowercase : int = 32 , __lowercase : str = "gelu" , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 100 , __lowercase : float = 0.02 , __lowercase : bool = True , __lowercase : List[Any]=True , __lowercase : int = 10 , __lowercase : int = 25 , __lowercase : int = 3 , **__lowercase : Optional[int] , ): '''simple docstring''' # time series specific configuration __a = prediction_length __a = context_length if context_length is not None else prediction_length __a = distribution_output __a = loss __a = input_size __a = num_time_features __a = lags_sequence __a = scaling __a = num_dynamic_real_features __a = num_static_real_features __a = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(__lowercase ) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""" ) __a = cardinality else: __a = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(__lowercase ) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""" ) __a = embedding_dimension else: __a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] __a = num_parallel_samples # Transformer architecture configuration __a = input_size * len(self.lags_sequence ) + self._number_of_features __a = d_model __a = encoder_attention_heads __a = decoder_attention_heads __a = encoder_ffn_dim __a = decoder_ffn_dim __a = encoder_layers __a = decoder_layers __a = dropout __a = attention_dropout __a = activation_dropout __a = encoder_layerdrop __a = decoder_layerdrop __a = activation_function __a = init_std __a = use_cache # Autoformer __a = label_length __a = moving_average __a = autocorrelation_factor super().__init__(is_encoder_decoder=__lowercase , **__lowercase ) @property def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
302
0
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class __UpperCamelCase : def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=2, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=16, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =13 lowerCamelCase_ =7 lowerCamelCase_ =True lowerCamelCase_ =True lowerCamelCase_ =True lowerCamelCase_ =True lowerCamelCase_ =99 lowerCamelCase_ =384 lowerCamelCase_ =2 lowerCamelCase_ =4 lowerCamelCase_ =37 lowerCamelCase_ ='''gelu''' lowerCamelCase_ =0.1 lowerCamelCase_ =0.1 lowerCamelCase_ =512 lowerCamelCase_ =16 lowerCamelCase_ =2 lowerCamelCase_ =0.0_2 lowerCamelCase_ =3 lowerCamelCase_ =4 lowerCamelCase_ =128 lowerCamelCase_ =2 lowerCamelCase_ =9 lowerCamelCase_ =1 lowerCamelCase_ =None def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase_ =None if self.use_input_mask: lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =None if self.use_token_type_ids: lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels ) lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices ) lowerCamelCase_ =ConvBertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=lowerCAmelCase, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =TFConvBertModel(config=lowerCAmelCase ) lowerCamelCase_ ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} lowerCamelCase_ =[input_ids, input_mask] lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =TFConvBertForMaskedLM(config=lowerCAmelCase ) lowerCamelCase_ ={ '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.num_labels lowerCamelCase_ =TFConvBertForSequenceClassification(config=lowerCAmelCase ) lowerCamelCase_ ={ '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.num_choices lowerCamelCase_ =TFConvBertForMultipleChoice(config=lowerCAmelCase ) lowerCamelCase_ =tf.tile(tf.expand_dims(lowerCAmelCase, 1 ), (1, self.num_choices, 1) ) lowerCamelCase_ =tf.tile(tf.expand_dims(lowerCAmelCase, 1 ), (1, self.num_choices, 1) ) lowerCamelCase_ =tf.tile(tf.expand_dims(lowerCAmelCase, 1 ), (1, self.num_choices, 1) ) lowerCamelCase_ ={ '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.num_labels lowerCamelCase_ =TFConvBertForTokenClassification(config=lowerCAmelCase ) lowerCamelCase_ ={ '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =TFConvBertForQuestionAnswering(config=lowerCAmelCase ) lowerCamelCase_ ={ '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =config_and_inputs lowerCamelCase_ ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : Any =( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) lowercase : List[str] =( { 'feature-extraction': TFConvBertModel, 'fill-mask': TFConvBertForMaskedLM, 'question-answering': TFConvBertForQuestionAnswering, 'text-classification': TFConvBertForSequenceClassification, 'token-classification': TFConvBertForTokenClassification, 'zero-shot': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) lowercase : int =False lowercase : List[Any] =False lowercase : Dict =False def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFConvBertModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, hidden_size=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ =True lowerCamelCase_ =True if hasattr(lowerCAmelCase, '''use_cache''' ): lowerCamelCase_ =True lowerCamelCase_ =getattr(self.model_tester, '''encoder_seq_length''', self.model_tester.seq_length ) lowerCamelCase_ =getattr(self.model_tester, '''key_length''', lowerCAmelCase ) for model_class in self.all_model_classes: lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =model_class(lowerCAmelCase ) lowerCamelCase_ =len(model(lowerCAmelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase, saved_model=lowerCAmelCase ) lowerCamelCase_ =os.path.join(lowerCAmelCase, '''saved_model''', '''1''' ) lowerCamelCase_ =tf.keras.models.load_model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase ) if self.is_encoder_decoder: lowerCamelCase_ =outputs['''encoder_hidden_states'''] lowerCamelCase_ =outputs['''encoder_attentions'''] else: lowerCamelCase_ =outputs['''hidden_states'''] lowerCamelCase_ =outputs['''attentions'''] self.assertEqual(len(lowerCAmelCase ), lowerCAmelCase ) lowerCamelCase_ =getattr( self.model_tester, '''expected_num_hidden_layers''', self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(lowerCAmelCase ), lowerCAmelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ), [self.model_tester.seq_length, self.model_tester.hidden_size], ) self.assertEqual(len(lowerCAmelCase ), self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' ) self.assertIsNotNone(lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ =True lowerCamelCase_ =getattr(self.model_tester, '''decoder_seq_length''', self.model_tester.seq_length ) lowerCamelCase_ =getattr(self.model_tester, '''encoder_seq_length''', self.model_tester.seq_length ) lowerCamelCase_ =getattr(self.model_tester, '''key_length''', lowerCAmelCase ) lowerCamelCase_ =getattr(self.model_tester, '''key_length''', lowerCAmelCase ) def check_decoder_attentions_output(lowerCAmelCase ): lowerCamelCase_ =len(lowerCAmelCase ) self.assertEqual(out_len % 2, 0 ) lowerCamelCase_ =outputs.decoder_attentions self.assertEqual(len(lowerCAmelCase ), self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length], ) def check_encoder_attentions_output(lowerCAmelCase ): lowerCamelCase_ =[ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(lowerCAmelCase ), self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], ) for model_class in self.all_model_classes: lowerCamelCase_ =True lowerCamelCase_ =False lowerCamelCase_ =model_class(lowerCAmelCase ) lowerCamelCase_ =model(self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) ) lowerCamelCase_ =len(lowerCAmelCase ) self.assertEqual(config.output_hidden_states, lowerCAmelCase ) check_encoder_attentions_output(lowerCAmelCase ) if self.is_encoder_decoder: lowerCamelCase_ =model_class(lowerCAmelCase ) lowerCamelCase_ =model(self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) ) self.assertEqual(config.output_hidden_states, lowerCAmelCase ) check_decoder_attentions_output(lowerCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] lowerCamelCase_ =True lowerCamelCase_ =model_class(lowerCAmelCase ) lowerCamelCase_ =model(self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) ) self.assertEqual(config.output_hidden_states, lowerCAmelCase ) check_encoder_attentions_output(lowerCAmelCase ) # Check attention is always last and order is fine lowerCamelCase_ =True lowerCamelCase_ =True lowerCamelCase_ =model_class(lowerCAmelCase ) lowerCamelCase_ =model(self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(lowerCAmelCase ) ) self.assertEqual(model.config.output_hidden_states, lowerCAmelCase ) check_encoder_attentions_output(lowerCAmelCase ) @require_tf class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' ) lowerCamelCase_ =tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase_ =model(lowerCAmelCase )[0] lowerCamelCase_ =[1, 6, 768] self.assertEqual(output.shape, lowerCAmelCase ) lowerCamelCase_ =tf.constant( [ [ [-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2], [0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4], [0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4], ] ] ) tf.debugging.assert_near(output[:, :3, :3], lowerCAmelCase, atol=1e-4 )
75
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
302
0
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask a_ = logging.getLogger(__name__) class _UpperCamelCase ( __A ): '''simple docstring''' def __init__( self : int , a : Optional[Any]=-1 ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = label_idx def __UpperCamelCase ( self : List[str] , a : Union[str, Any] , a : Union[Split, str] ) -> List[InputExample]: """simple docstring""" if isinstance(a , a ): SCREAMING_SNAKE_CASE : Any = mode.value SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(a , F"{mode}.txt" ) SCREAMING_SNAKE_CASE : List[str] = 1 SCREAMING_SNAKE_CASE : int = [] with open(a , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE : Union[str, Any] = [] SCREAMING_SNAKE_CASE : int = [] for line in f: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=a , labels=a ) ) guid_index += 1 SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : Optional[int] = [] else: SCREAMING_SNAKE_CASE : Optional[Any] = line.split(" " ) words.append(splits[0] ) if len(a ) > 1: labels.append(splits[self.label_idx].replace("\n" , "" ) ) else: # Examples could have no label for mode = "test" labels.append("O" ) if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=a , labels=a ) ) return examples def __UpperCamelCase ( self : Optional[Any] , a : TextIO , a : TextIO , a : List ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : int = 0 for line in test_input_reader: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": writer.write(a ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: SCREAMING_SNAKE_CASE : str = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n" writer.write(a ) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] ) def __UpperCamelCase ( self : Dict , a : str ) -> List[str]: """simple docstring""" if path: with open(a , "r" ) as f: SCREAMING_SNAKE_CASE : List[str] = f.read().splitlines() if "O" not in labels: SCREAMING_SNAKE_CASE : Dict = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class _UpperCamelCase ( __A ): '''simple docstring''' def __init__( self : List[Any] ) -> Dict: """simple docstring""" super().__init__(label_idx=-2 ) def __UpperCamelCase ( self : Optional[Any] , a : str ) -> List[str]: """simple docstring""" if path: with open(a , "r" ) as f: SCREAMING_SNAKE_CASE : Optional[Any] = f.read().splitlines() if "O" not in labels: SCREAMING_SNAKE_CASE : List[Any] = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class _UpperCamelCase ( __A ): '''simple docstring''' def __UpperCamelCase ( self : str , a : str , a : Union[Split, str] ) -> List[InputExample]: """simple docstring""" if isinstance(a , a ): SCREAMING_SNAKE_CASE : Dict = mode.value SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(a , F"{mode}.txt" ) SCREAMING_SNAKE_CASE : Any = 1 SCREAMING_SNAKE_CASE : List[str] = [] with open(a , encoding="utf-8" ) as f: for sentence in parse_incr(a ): SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : Tuple = [] for token in sentence: words.append(token["form"] ) labels.append(token["upos"] ) assert len(a ) == len(a ) if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=a , labels=a ) ) guid_index += 1 return examples def __UpperCamelCase ( self : int , a : TextIO , a : TextIO , a : List ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = 0 for sentence in parse_incr(a ): SCREAMING_SNAKE_CASE : Optional[Any] = preds_list[example_id] SCREAMING_SNAKE_CASE : Optional[int] = "" for token in sentence: out += F"{token['form']} ({token['upos']}|{s_p.pop(0 )}) " out += "\n" writer.write(a ) example_id += 1 def __UpperCamelCase ( self : List[Any] , a : str ) -> List[str]: """simple docstring""" if path: with open(a , "r" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
76
from __future__ import annotations lowerCamelCase__ = """#""" class SCREAMING_SNAKE_CASE : def __init__( self : Optional[Any] ): '''simple docstring''' __a = {} def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str ): '''simple docstring''' __a = self._trie for char in text: if char not in trie: __a = {} __a = trie[char] __a = True def UpperCamelCase_ ( self : Tuple , __lowercase : str ): '''simple docstring''' __a = self._trie for char in prefix: if char in trie: __a = trie[char] else: return [] return self._elements(__lowercase ) def UpperCamelCase_ ( self : Optional[int] , __lowercase : dict ): '''simple docstring''' __a = [] for c, v in d.items(): __a = [""" """] if c == END else [(c + s) for s in self._elements(__lowercase )] result.extend(__lowercase ) return tuple(__lowercase ) lowerCamelCase__ = Trie() lowerCamelCase__ = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""") for word in words: trie.insert_word(word) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" __a = trie.find_word(_SCREAMING_SNAKE_CASE ) return tuple(string + word for word in suffixes ) def lowerCAmelCase__ ( ): """simple docstring""" print(autocomplete_using_trie("""de""" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
302
0
"""simple docstring""" from __future__ import annotations import math def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : bool , _lowerCAmelCase : list[int] , _lowerCAmelCase : float ): '''simple docstring''' if depth < 0: raise ValueError('Depth cannot be less than 0' ) if not scores: raise ValueError('Scores cannot be empty' ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , ) ) def a_ ( ): '''simple docstring''' lowercase__ : str = [90, 23, 6, 33, 21, 65, 123, 3_4423] lowercase__ : Tuple = math.log(len(_lowerCAmelCase ) , 2 ) print(f"""Optimal value : {minimax(0 , 0 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )}""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
77
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : torch.FloatTensor class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self : Dict , __lowercase : int = 32 , __lowercase : int = 64 , __lowercase : int = 20 , __lowercase : int = 768 , __lowercase : Any=77 , __lowercase : Optional[int]=4 , __lowercase : float = 0.0 , __lowercase : str = "silu" , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "linear" , __lowercase : Optional[str] = "prd" , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , ): '''simple docstring''' super().__init__() __a = num_attention_heads __a = attention_head_dim __a = num_attention_heads * attention_head_dim __a = additional_embeddings __a = time_embed_dim or inner_dim __a = embedding_proj_dim or embedding_dim __a = clip_embed_dim or embedding_dim __a = Timesteps(__lowercase , __lowercase , 0 ) __a = TimestepEmbedding(__lowercase , __lowercase , out_dim=__lowercase , act_fn=__lowercase ) __a = nn.Linear(__lowercase , __lowercase ) if embedding_proj_norm_type is None: __a = None elif embedding_proj_norm_type == "layer": __a = nn.LayerNorm(__lowercase ) else: raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" ) __a = nn.Linear(__lowercase , __lowercase ) if encoder_hid_proj_type is None: __a = None elif encoder_hid_proj_type == "linear": __a = nn.Linear(__lowercase , __lowercase ) else: raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" ) __a = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowercase ) ) if added_emb_type == "prd": __a = nn.Parameter(torch.zeros(1 , 1 , __lowercase ) ) elif added_emb_type is None: __a = None else: raise ValueError( F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." ) __a = nn.ModuleList( [ BasicTransformerBlock( __lowercase , __lowercase , __lowercase , dropout=__lowercase , activation_fn="""gelu""" , attention_bias=__lowercase , ) for d in range(__lowercase ) ] ) if norm_in_type == "layer": __a = nn.LayerNorm(__lowercase ) elif norm_in_type is None: __a = None else: raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." ) __a = nn.LayerNorm(__lowercase ) __a = nn.Linear(__lowercase , __lowercase ) __a = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 ) causal_attention_mask.triu_(1 ) __a = causal_attention_mask[None, ...] self.register_buffer("""causal_attention_mask""" , __lowercase , persistent=__lowercase ) __a = nn.Parameter(torch.zeros(1 , __lowercase ) ) __a = nn.Parameter(torch.zeros(1 , __lowercase ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' __a = {} def fn_recursive_add_processors(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict[str, AttentionProcessor] ): if hasattr(__lowercase , """set_processor""" ): __a = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"{name}.{sub_name}" , __lowercase , __lowercase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__lowercase , __lowercase , __lowercase ) return processors def UpperCamelCase_ ( self : List[str] , __lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ): '''simple docstring''' __a = len(self.attn_processors.keys() ) if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != count: raise ValueError( F"A dict of processors was passed, but the number of processors {len(__lowercase )} does not match the" F" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict ): if hasattr(__lowercase , """set_processor""" ): if not isinstance(__lowercase , __lowercase ): module.set_processor(__lowercase ) else: module.set_processor(processor.pop(F"{name}.processor" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"{name}.{sub_name}" , __lowercase , __lowercase ) for name, module in self.named_children(): fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase ) def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' self.set_attn_processor(AttnProcessor() ) def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[torch.Tensor, float, int] , __lowercase : torch.FloatTensor , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.BoolTensor] = None , __lowercase : bool = True , ): '''simple docstring''' __a = hidden_states.shape[0] __a = timestep if not torch.is_tensor(__lowercase ): __a = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0: __a = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __a = timesteps * torch.ones(__lowercase , dtype=timesteps.dtype , device=timesteps.device ) __a = self.time_proj(__lowercase ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. __a = timesteps_projected.to(dtype=self.dtype ) __a = self.time_embedding(__lowercase ) if self.embedding_proj_norm is not None: __a = self.embedding_proj_norm(__lowercase ) __a = self.embedding_proj(__lowercase ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: __a = self.encoder_hidden_states_proj(__lowercase ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" ) __a = self.proj_in(__lowercase ) __a = self.positional_embedding.to(hidden_states.dtype ) __a = [] __a = 0 if encoder_hidden_states is not None: additional_embeds.append(__lowercase ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: __a = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: __a = hidden_states[:, None, :] __a = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: __a = self.prd_embedding.to(hidden_states.dtype ).expand(__lowercase , -1 , -1 ) additional_embeds.append(__lowercase ) __a = torch.cat( __lowercase , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens __a = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: __a = F.pad( __lowercase , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) __a = hidden_states + positional_embeddings if attention_mask is not None: __a = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0 __a = F.pad(__lowercase , (0, self.additional_embeddings) , value=0.0 ) __a = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) __a = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: __a = self.norm_in(__lowercase ) for block in self.transformer_blocks: __a = block(__lowercase , attention_mask=__lowercase ) __a = self.norm_out(__lowercase ) if self.prd_embedding is not None: __a = hidden_states[:, -1] else: __a = hidden_states[:, additional_embeddings_len:] __a = self.proj_to_clip_embeddings(__lowercase ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=__lowercase ) def UpperCamelCase_ ( self : Any , __lowercase : Tuple ): '''simple docstring''' __a = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
302
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available snake_case_ = { """configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""], """tokenization_biogpt""": ["""BioGptTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ """BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BioGptForCausalLM""", """BioGptForTokenClassification""", """BioGptForSequenceClassification""", """BioGptModel""", """BioGptPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
78
from functools import lru_cache def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" __a = 2 __a = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(_SCREAMING_SNAKE_CASE ) if n > 1: factors.add(_SCREAMING_SNAKE_CASE ) return factors @lru_cache def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" return len(unique_prime_factors(_SCREAMING_SNAKE_CASE ) ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ): """simple docstring""" return len(set(_SCREAMING_SNAKE_CASE ) ) in (0, 1) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" __a = 2 while True: # Increment each value of a generated range __a = [base + i for i in range(_SCREAMING_SNAKE_CASE )] # Run elements through out unique_prime_factors function # Append our target number to the end. __a = [upf_len(_SCREAMING_SNAKE_CASE ) for x in group] checker.append(_SCREAMING_SNAKE_CASE ) # If all numbers in the list are equal, return the group variable. if equality(_SCREAMING_SNAKE_CASE ): return group # Increment our base variable by 1 base += 1 def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 4 ): """simple docstring""" __a = run(_SCREAMING_SNAKE_CASE ) return results[0] if len(_SCREAMING_SNAKE_CASE ) else None if __name__ == "__main__": print(solution())
302
0
'''simple docstring''' import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _UpperCAmelCase : """simple docstring""" def __init__( self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : int=30 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : str=3 , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Any=32 , __UpperCAmelCase : int=5 , __UpperCAmelCase : int=4 , __UpperCAmelCase : List[Any]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Any=10 , __UpperCAmelCase : Dict=0.02 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Union[str, Any]=0.6 , __UpperCAmelCase : Any=None , ): '''simple docstring''' _A = parent _A = batch_size _A = image_size _A = patch_size _A = num_channels _A = is_training _A = use_labels _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = type_sequence_label_size _A = initializer_range _A = mask_ratio _A = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) _A = (image_size // patch_size) ** 2 _A = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple ): '''simple docstring''' _A = ViTMAEModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] ): '''simple docstring''' _A = ViTMAEForPreTraining(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _A = model(__UpperCAmelCase ) _A = (self.image_size // self.patch_size) ** 2 _A = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images _A = 1 _A = ViTMAEForPreTraining(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _A = model(__UpperCAmelCase ) _A = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = self.prepare_config_and_inputs() _A , _A , _A = config_and_inputs _A = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () snake_case = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {} snake_case = False snake_case = False snake_case = False snake_case = False def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = ViTMAEModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="ViTMAE does not use inputs_embeds" ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' pass def lowerCAmelCase ( self : int ): '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def lowerCAmelCase ( self : str ): '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__UpperCAmelCase ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def lowerCAmelCase ( self : int ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase ) def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] ): '''simple docstring''' np.random.seed(2 ) _A = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) _A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _A = torch.from_numpy(__UpperCAmelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument _A = pt_noise super().check_pt_tf_models(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): _A = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) _A = outputs[0].cpu().numpy() _A = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__UpperCAmelCase ) _A = model_class.from_pretrained(__UpperCAmelCase ) model.to(__UpperCAmelCase ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): _A = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) # Make sure we don't have nans _A = after_outputs[0].cpu().numpy() _A = 0 _A = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__UpperCAmelCase , 1E-5 ) @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' pass @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' pass @unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def lowerCAmelCase ( self : Any ): '''simple docstring''' pass @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = ViTMAEModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __lowercase ( ) -> Optional[int]: '''simple docstring''' _A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCAmelCase ( self : Any ): '''simple docstring''' return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' np.random.seed(2 ) _A = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCAmelCase ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) _A = ViTMAEConfig() _A = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) _A = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): _A = model(**__UpperCAmelCase , noise=torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase ) ) # verify the logits _A = torch.Size((1, 196, 768) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) _A = torch.tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCAmelCase ) , atol=1E-4 ) )
79
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" __a = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: __a = 128 elif "12-12" in model_name: __a = 12 __a = 12 elif "14-14" in model_name: __a = 14 __a = 14 elif "16-16" in model_name: __a = 16 __a = 16 else: raise ValueError("""Model not supported""" ) __a = """huggingface/label-files""" if "speech-commands" in model_name: __a = 35 __a = """speech-commands-v2-id2label.json""" else: __a = 527 __a = """audioset-id2label.json""" __a = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) __a = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __a = idalabel __a = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" if "module.v" in name: __a = name.replace("""module.v""" , """audio_spectrogram_transformer""" ) if "cls_token" in name: __a = name.replace("""cls_token""" , """embeddings.cls_token""" ) if "dist_token" in name: __a = name.replace("""dist_token""" , """embeddings.distillation_token""" ) if "pos_embed" in name: __a = name.replace("""pos_embed""" , """embeddings.position_embeddings""" ) if "patch_embed.proj" in name: __a = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) # transformer blocks if "blocks" in name: __a = name.replace("""blocks""" , """encoder.layer""" ) if "attn.proj" in name: __a = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: __a = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: __a = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: __a = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: __a = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: __a = name.replace("""mlp.fc2""" , """output.dense""" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: __a = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" ) # classifier head if "module.mlp_head.0" in name: __a = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" ) if "module.mlp_head.1" in name: __a = name.replace("""module.mlp_head.1""" , """classifier.dense""" ) return name def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" for key in orig_state_dict.copy().keys(): __a = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "qkv" in key: __a = key.split(""".""" ) __a = int(key_split[3] ) __a = config.hidden_size if "weight" in key: __a = val[:dim, :] __a = val[dim : dim * 2, :] __a = val[-dim:, :] else: __a = val[:dim] __a = val[dim : dim * 2] __a = val[-dim:] else: __a = val return orig_state_dict def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" __a = [ """module.v.head.weight""", """module.v.head.bias""", """module.v.head_dist.weight""", """module.v.head_dist.bias""", ] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @torch.no_grad() def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str]=False ): """simple docstring""" __a = get_audio_spectrogram_transformer_config(_SCREAMING_SNAKE_CASE ) __a = { """ast-finetuned-audioset-10-10-0.4593""": ( """https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.450""": ( """https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448""": ( """https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448-v2""": ( """https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1""" ), """ast-finetuned-audioset-12-12-0.447""": ( """https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1""" ), """ast-finetuned-audioset-14-14-0.443""": ( """https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1""" ), """ast-finetuned-audioset-16-16-0.442""": ( """https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1""" ), """ast-finetuned-speech-commands-v2""": ( """https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1""" ), } # load original state_dict __a = model_name_to_url[model_name] __a = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" ) # remove some keys remove_keys(_SCREAMING_SNAKE_CASE ) # rename some keys __a = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load 🤗 model __a = ASTForAudioClassification(_SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 __a = -4.267_7393 if """speech-commands""" not in model_name else -6.84_5978 __a = 4.568_9974 if """speech-commands""" not in model_name else 5.565_4526 __a = 1024 if """speech-commands""" not in model_name else 128 __a = ASTFeatureExtractor(mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) if "speech-commands" in model_name: __a = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" ) __a = dataset[0]["""audio"""]["""array"""] else: __a = hf_hub_download( repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , ) __a , __a = torchaudio.load(_SCREAMING_SNAKE_CASE ) __a = waveform.squeeze().numpy() __a = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=1_6000 , return_tensors="""pt""" ) # forward pass __a = model(**_SCREAMING_SNAKE_CASE ) __a = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": __a = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": __a = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": __a = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": __a = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": __a = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": __a = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": __a = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": __a = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError("""Unknown model name""" ) if not torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ): raise ValueError("""Logits don't match""" ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f"Saving feature extractor to {pytorch_dump_folder_path}" ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: print("""Pushing model and feature extractor to the hub...""" ) model.push_to_hub(f"MIT/{model_name}" ) feature_extractor.push_to_hub(f"MIT/{model_name}" ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowerCamelCase__ = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
302
0
'''simple docstring''' def _UpperCamelCase ( __A , __A ) -> Dict: '''simple docstring''' UpperCamelCase__ = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def _UpperCamelCase ( __A , __A , __A ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ = 0 while b > 0: if b & 1: UpperCamelCase__ = ((res % c) + (a % c)) % c a += a b >>= 1 return res
80
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: lowerCamelCase__ = None lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCamelCase__ = { """vocab_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""", }, """tokenizer_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""", }, } lowerCamelCase__ = { """albert-base-v1""": 512, """albert-large-v1""": 512, """albert-xlarge-v1""": 512, """albert-xxlarge-v1""": 512, """albert-base-v2""": 512, """albert-large-v2""": 512, """albert-xlarge-v2""": 512, """albert-xxlarge-v2""": 512, } lowerCamelCase__ = """▁""" class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : List[Any] =VOCAB_FILES_NAMES __lowerCamelCase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : Any =AlbertTokenizer def __init__( self : Tuple , __lowercase : Union[str, Any]=None , __lowercase : Optional[int]=None , __lowercase : int=True , __lowercase : Dict=True , __lowercase : str=False , __lowercase : str="[CLS]" , __lowercase : List[Any]="[SEP]" , __lowercase : Any="<unk>" , __lowercase : List[Any]="[SEP]" , __lowercase : List[Any]="<pad>" , __lowercase : Optional[Any]="[CLS]" , __lowercase : List[str]="[MASK]" , **__lowercase : str , ): '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. __a = ( AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token ) super().__init__( __lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , ) __a = do_lower_case __a = remove_space __a = keep_accents __a = vocab_file __a = False if not self.vocab_file else True def UpperCamelCase_ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ): '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase_ ( self : str , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ): '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__lowercase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return __a = os.path.join( __lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ): copyfile(self.vocab_file , __lowercase ) return (out_vocab_file,)
302
0
"""simple docstring""" def _A ( lowercase = 50 ): """simple docstring""" a =[1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F'{solution() = }')
81
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Optional[int] =(IPNDMScheduler,) __lowerCamelCase : int =(('num_inference_steps', 50),) def UpperCamelCase_ ( self : str , **__lowercase : Dict ): '''simple docstring''' __a = {"""num_train_timesteps""": 1000} config.update(**__lowercase ) return config def UpperCamelCase_ ( self : Any , __lowercase : Tuple=0 , **__lowercase : Dict ): '''simple docstring''' __a = dict(self.forward_default_kwargs ) __a = kwargs.pop("""num_inference_steps""" , __lowercase ) __a = self.dummy_sample __a = 0.1 * sample __a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __a = self.get_scheduler_config(**__lowercase ) __a = scheduler_class(**__lowercase ) scheduler.set_timesteps(__lowercase ) # copy over dummy past residuals __a = dummy_past_residuals[:] if time_step is None: __a = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowercase ) __a = scheduler_class.from_pretrained(__lowercase ) new_scheduler.set_timesteps(__lowercase ) # copy over dummy past residuals __a = dummy_past_residuals[:] __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self : str ): '''simple docstring''' pass def UpperCamelCase_ ( self : str , __lowercase : int=0 , **__lowercase : Dict ): '''simple docstring''' __a = dict(self.forward_default_kwargs ) __a = kwargs.pop("""num_inference_steps""" , __lowercase ) __a = self.dummy_sample __a = 0.1 * sample __a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __a = self.get_scheduler_config() __a = scheduler_class(**__lowercase ) scheduler.set_timesteps(__lowercase ) # copy over dummy past residuals (must be after setting timesteps) __a = dummy_past_residuals[:] if time_step is None: __a = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowercase ) __a = scheduler_class.from_pretrained(__lowercase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowercase ) # copy over dummy past residual (must be after setting timesteps) __a = dummy_past_residuals[:] __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self : List[str] , **__lowercase : Dict ): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config(**__lowercase ) __a = scheduler_class(**__lowercase ) __a = 10 __a = self.dummy_model() __a = self.dummy_sample_deter scheduler.set_timesteps(__lowercase ) for i, t in enumerate(scheduler.timesteps ): __a = model(__lowercase , __lowercase ) __a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample for i, t in enumerate(scheduler.timesteps ): __a = model(__lowercase , __lowercase ) __a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample return sample def UpperCamelCase_ ( self : str ): '''simple docstring''' __a = dict(self.forward_default_kwargs ) __a = kwargs.pop("""num_inference_steps""" , __lowercase ) for scheduler_class in self.scheduler_classes: __a = self.get_scheduler_config() __a = scheduler_class(**__lowercase ) __a = self.dummy_sample __a = 0.1 * sample if num_inference_steps is not None and hasattr(__lowercase , """set_timesteps""" ): scheduler.set_timesteps(__lowercase ) elif num_inference_steps is not None and not hasattr(__lowercase , """set_timesteps""" ): __a = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] __a = dummy_past_residuals[:] __a = scheduler.timesteps[5] __a = scheduler.timesteps[6] __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample __a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=__lowercase , time_step=__lowercase ) def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=__lowercase , time_step=__lowercase ) def UpperCamelCase_ ( self : int ): '''simple docstring''' __a = self.full_loop() __a = torch.mean(torch.abs(__lowercase ) ) assert abs(result_mean.item() - 2540529 ) < 10
302
0
import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class __lowerCAmelCase ( lowerCamelCase__ ): def __init__( self , _snake_case , _snake_case , _snake_case=1024 , _snake_case=1024 , _snake_case=3.6 ): """simple docstring""" _lowerCAmelCase = tokenizer _lowerCAmelCase = tokenizer.bos_token_id _lowerCAmelCase = dataset _lowerCAmelCase = seq_length _lowerCAmelCase = seq_length * chars_per_token * num_of_sequences def __iter__( self ): """simple docstring""" _lowerCAmelCase = iter(self.dataset ) _lowerCAmelCase = True while more_examples: _lowerCAmelCase , _lowerCAmelCase = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(_snake_case )["""content"""] ) buffer_len += len(buffer[-1] ) except StopIteration: _lowerCAmelCase = False break _lowerCAmelCase = tokenizer(_snake_case , truncation=_snake_case )["""input_ids"""] _lowerCAmelCase = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(_snake_case ) , self.seq_length ): _lowerCAmelCase = all_token_ids[i : i + self.seq_length] if len(_snake_case ) == self.seq_length: yield torch.tensor(_snake_case ) def _UpperCAmelCase ( snake_case ): """simple docstring""" _lowerCAmelCase = {"""streaming""": True} _lowerCAmelCase = load_dataset(args.dataset_name , split="""train""" , **snake_case ) _lowerCAmelCase = ConstantLengthDataset(snake_case , snake_case , seq_length=args.seq_length ) _lowerCAmelCase = DataLoader(snake_case , batch_size=args.batch_size ) return eval_dataloader def _UpperCAmelCase ( snake_case ): """simple docstring""" model.eval() _lowerCAmelCase = [] for step, batch in enumerate(snake_case ): with torch.no_grad(): _lowerCAmelCase = model(snake_case , labels=snake_case ) _lowerCAmelCase = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(snake_case ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break _lowerCAmelCase = torch.mean(torch.cat(snake_case ) ) try: _lowerCAmelCase = torch.exp(snake_case ) except OverflowError: _lowerCAmelCase = float("""inf""" ) return loss.item(), perplexity.item() # Setup Accelerator A__ = Accelerator() # Parse configuration A__ = HfArgumentParser(EvaluationArguments) A__ = parser.parse_args() set_seed(args.seed) # Logging A__ = logging.getLogger(__name__) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) # Load model and tokenizer A__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt) A__ = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader A__ = create_dataloader(args) # Prepare everything with our `accelerator`. A__ , A__ = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info("""Evaluating and saving model after training""") A__ , A__ = evaluate(args) logger.info(f"loss/eval: {eval_loss}, perplexity: {perplexity}")
82
from __future__ import annotations lowerCamelCase__ = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } class SCREAMING_SNAKE_CASE : def __init__( self : Tuple , __lowercase : dict[str, list[str]] , __lowercase : str ): '''simple docstring''' __a = graph # mapping node to its parent in resulting breadth first tree __a = {} __a = source_vertex def UpperCamelCase_ ( self : Any ): '''simple docstring''' __a = {self.source_vertex} __a = None __a = [self.source_vertex] # first in first out queue while queue: __a = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(__lowercase ) __a = vertex queue.append(__lowercase ) def UpperCamelCase_ ( self : Optional[int] , __lowercase : str ): '''simple docstring''' if target_vertex == self.source_vertex: return self.source_vertex __a = self.parent.get(__lowercase ) if target_vertex_parent is None: __a = ( F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" ) raise ValueError(__lowercase ) return self.shortest_path(__lowercase ) + F"->{target_vertex}" if __name__ == "__main__": lowerCamelCase__ = Graph(graph, """G""") g.breath_first_search() print(g.shortest_path("""D""")) print(g.shortest_path("""G""")) print(g.shortest_path("""Foo"""))
302
0
'''simple docstring''' import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip snake_case_ : Dict = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def A__ ( UpperCAmelCase_ ): if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): return max(metric_fn(UpperCAmelCase_ , UpperCAmelCase_ ) for gt in ground_truths ) def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): _UpperCamelCase : int = [line.strip() for line in open(UpperCAmelCase_ , 'r' ).readlines()] _UpperCamelCase : Union[str, Any] = [] if args.gold_data_mode == "qa": _UpperCamelCase : List[str] = pd.read_csv(UpperCAmelCase_ , sep='\t' , header=UpperCAmelCase_ ) for answer_list in data[1]: _UpperCamelCase : Any = ast.literal_eval(UpperCAmelCase_ ) answers.append(UpperCAmelCase_ ) else: _UpperCamelCase : Optional[int] = [line.strip() for line in open(UpperCAmelCase_ , 'r' ).readlines()] _UpperCamelCase : Union[str, Any] = [[reference] for reference in references] _UpperCamelCase : List[str] = 0 for prediction, ground_truths in zip(UpperCAmelCase_ , UpperCAmelCase_ ): total += 1 em += metric_max_over_ground_truths(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) fa += metric_max_over_ground_truths(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : Dict = 100.0 * em / total _UpperCamelCase : int = 100.0 * fa / total logger.info(f'F1: {fa:.2f}' ) logger.info(f'EM: {em:.2f}' ) def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): _UpperCamelCase : Optional[int] = args.k _UpperCamelCase : Dict = [line.strip() for line in open(UpperCAmelCase_ , 'r' ).readlines()] _UpperCamelCase : str = [line.strip() for line in open(UpperCAmelCase_ , 'r' ).readlines()] _UpperCamelCase : Union[str, Any] = 0 for hypo, reference in zip(UpperCAmelCase_ , UpperCAmelCase_ ): _UpperCamelCase : List[Any] = set(hypo.split('\t' )[:k] ) _UpperCamelCase : List[Any] = set(reference.split('\t' ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k _UpperCamelCase : Any = 100.0 * em / total logger.info(f'Precision@{k}: {em: .2f}' ) def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): def strip_title(UpperCAmelCase_ ): if title.startswith('"' ): _UpperCamelCase : List[str] = title[1:] if title.endswith('"' ): _UpperCamelCase : Any = title[:-1] return title _UpperCamelCase : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( UpperCAmelCase_ , return_tensors='pt' , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , )['input_ids'].to(args.device ) _UpperCamelCase : List[str] = rag_model.rag.question_encoder(UpperCAmelCase_ ) _UpperCamelCase : Optional[Any] = question_enc_outputs[0] _UpperCamelCase : str = rag_model.retriever( UpperCAmelCase_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , ) _UpperCamelCase : Tuple = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) _UpperCamelCase : Optional[Any] = [] for docs in all_docs: _UpperCamelCase : Any = [strip_title(UpperCAmelCase_ ) for title in docs['title']] provenance_strings.append('\t'.join(UpperCAmelCase_ ) ) return provenance_strings def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): with torch.no_grad(): _UpperCamelCase : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( UpperCAmelCase_ , return_tensors='pt' , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ ) _UpperCamelCase : Union[str, Any] = inputs_dict.input_ids.to(args.device ) _UpperCamelCase : Optional[int] = inputs_dict.attention_mask.to(args.device ) _UpperCamelCase : str = rag_model.generate( # rag_model overwrites generate UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=UpperCAmelCase_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) _UpperCamelCase : List[str] = rag_model.retriever.generator_tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) if args.print_predictions: for q, a in zip(UpperCAmelCase_ , UpperCAmelCase_ ): logger.info('Q: {} - A: {}'.format(UpperCAmelCase_ , UpperCAmelCase_ ) ) return answers def A__ ( ): _UpperCamelCase : str = argparse.ArgumentParser() parser.add_argument( '--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=UpperCAmelCase_ , help=( 'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the' ' model_name_or_path' ) , ) parser.add_argument( '--index_name' , default=UpperCAmelCase_ , choices=['exact', 'compressed', 'legacy'] , type=UpperCAmelCase_ , help='RAG model retriever type' , ) parser.add_argument( '--index_path' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help='Path to the retrieval index' , ) parser.add_argument('--n_docs' , default=5 , type=UpperCAmelCase_ , help='Number of retrieved docs' ) parser.add_argument( '--model_name_or_path' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , ) parser.add_argument( '--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=UpperCAmelCase_ , help=( 'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates' ' precision@k.' ) , ) parser.add_argument('--k' , default=1 , type=UpperCAmelCase_ , help='k for the precision@k calculation' ) parser.add_argument( '--evaluation_set' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='Path to a file containing evaluation samples' , ) parser.add_argument( '--gold_data_path' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='Path to a tab-separated file with gold samples' , ) parser.add_argument( '--gold_data_mode' , default='qa' , type=UpperCAmelCase_ , choices=['qa', 'ans'] , help=( 'Format of the gold data file' 'qa - a single line in the following format: question [tab] answer_list' 'ans - a single line of the gold file contains the expected answer string' ) , ) parser.add_argument( '--predictions_path' , type=UpperCAmelCase_ , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , ) parser.add_argument( '--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , ) parser.add_argument( '--eval_batch_size' , default=8 , type=UpperCAmelCase_ , help='Batch size per GPU/CPU for evaluation.' , ) parser.add_argument( '--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , ) parser.add_argument( '--num_beams' , default=4 , type=UpperCAmelCase_ , help='Number of beams to be used when generating answers' , ) parser.add_argument('--min_length' , default=1 , type=UpperCAmelCase_ , help='Min length of the generated answers' ) parser.add_argument('--max_length' , default=5_0 , type=UpperCAmelCase_ , help='Max length of the generated answers' ) parser.add_argument( '--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , ) parser.add_argument( '--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , ) _UpperCamelCase : Optional[Any] = parser.parse_args() _UpperCamelCase : Tuple = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) return args def A__ ( UpperCAmelCase_ ): _UpperCamelCase : Optional[int] = {} if args.model_type is None: _UpperCamelCase : Tuple = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith('rag' ): _UpperCamelCase : str = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration _UpperCamelCase : Dict = args.n_docs if args.index_name is not None: _UpperCamelCase : int = args.index_name if args.index_path is not None: _UpperCamelCase : int = args.index_path else: _UpperCamelCase : Any = BartForConditionalGeneration _UpperCamelCase : int = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info('Evaluate the following checkpoints: %s' , UpperCAmelCase_ ) _UpperCamelCase : Tuple = get_scores if args.eval_mode == 'e2e' else get_precision_at_k _UpperCamelCase : Dict = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) ) score_fn(UpperCAmelCase_ , args.predictions_path , args.gold_data_path ) continue logger.info('***** Running evaluation for {} *****'.format(UpperCAmelCase_ ) ) logger.info(' Batch size = %d' , args.eval_batch_size ) logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) ) if args.model_type.startswith('rag' ): _UpperCamelCase : Dict = RagRetriever.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) _UpperCamelCase : Optional[Any] = model_class.from_pretrained(UpperCAmelCase_ , retriever=UpperCAmelCase_ , **UpperCAmelCase_ ) model.retriever.init_retrieval() else: _UpperCamelCase : int = model_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) model.to(args.device ) with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file: _UpperCamelCase : str = [] for line in tqdm(UpperCAmelCase_ ): questions.append(line.strip() ) if len(UpperCAmelCase_ ) == args.eval_batch_size: _UpperCamelCase : List[str] = evaluate_batch_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) preds_file.write('\n'.join(UpperCAmelCase_ ) + '\n' ) preds_file.flush() _UpperCamelCase : Optional[Any] = [] if len(UpperCAmelCase_ ) > 0: _UpperCamelCase : Optional[Any] = evaluate_batch_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) preds_file.write('\n'.join(UpperCAmelCase_ ) ) preds_file.flush() score_fn(UpperCAmelCase_ , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": snake_case_ : int = get_args() main(args)
83
import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Tuple =KandinskyVaaPriorPipeline __lowerCamelCase : Union[str, Any] =['prompt'] __lowerCamelCase : Any =['prompt', 'negative_prompt'] __lowerCamelCase : List[str] =[ 'num_images_per_prompt', 'generator', 'num_inference_steps', 'latents', 'negative_prompt', 'guidance_scale', 'output_type', 'return_dict', ] __lowerCamelCase : List[Any] =False @property def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return 32 @property def UpperCamelCase_ ( self : Any ): '''simple docstring''' return 32 @property def UpperCamelCase_ ( self : str ): '''simple docstring''' return self.time_input_dim @property def UpperCamelCase_ ( self : str ): '''simple docstring''' return self.time_input_dim * 4 @property def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return 100 @property def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' __a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' torch.manual_seed(0 ) __a = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(__lowercase ) @property def UpperCamelCase_ ( self : int ): '''simple docstring''' torch.manual_seed(0 ) __a = { """num_attention_heads""": 2, """attention_head_dim""": 12, """embedding_dim""": self.text_embedder_hidden_size, """num_layers""": 1, } __a = PriorTransformer(**__lowercase ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 __a = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' torch.manual_seed(0 ) __a = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) __a = CLIPVisionModelWithProjection(__lowercase ) return model @property def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' __a = CLIPImageProcessor( crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , ) return image_processor def UpperCamelCase_ ( self : str ): '''simple docstring''' __a = self.dummy_prior __a = self.dummy_image_encoder __a = self.dummy_text_encoder __a = self.dummy_tokenizer __a = self.dummy_image_processor __a = UnCLIPScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=10.0 , ) __a = { """prior""": prior, """image_encoder""": image_encoder, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """scheduler""": scheduler, """image_processor""": image_processor, } return components def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[str] , __lowercase : Any=0 ): '''simple docstring''' if str(__lowercase ).startswith("""mps""" ): __a = torch.manual_seed(__lowercase ) else: __a = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __a = { """prompt""": """horse""", """generator""": generator, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' __a = """cpu""" __a = self.get_dummy_components() __a = self.pipeline_class(**__lowercase ) __a = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __a = pipe(**self.get_dummy_inputs(__lowercase ) ) __a = output.image_embeds __a = pipe( **self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0] __a = image[0, -10:] __a = image_from_tuple[0, -10:] assert image.shape == (1, 32) __a = np.array( [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def UpperCamelCase_ ( self : Dict ): '''simple docstring''' __a = torch_device == """cpu""" __a = True __a = False self._test_inference_batch_single_identical( test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , ) @skip_mps def UpperCamelCase_ ( self : Any ): '''simple docstring''' __a = torch_device == """cpu""" __a = False self._test_attention_slicing_forward_pass( test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
302
0
"""simple docstring""" import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'tensor(bool)': np.bool_, 'tensor(int8)': np.inta, 'tensor(uint8)': np.uinta, 'tensor(int16)': np.intaa, 'tensor(uint16)': np.uintaa, 'tensor(int32)': np.intaa, 'tensor(uint32)': np.uintaa, 'tensor(int64)': np.intaa, 'tensor(uint64)': np.uintaa, 'tensor(float16)': np.floataa, 'tensor(float)': np.floataa, 'tensor(double)': np.floataa, } class _SCREAMING_SNAKE_CASE : def __init__( self , __A=None , **__A ) -> Optional[int]: logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" ) lowerCAmelCase_ :Dict = model lowerCAmelCase_ :List[Any] = kwargs.get("""model_save_dir""" , __A ) lowerCAmelCase_ :Dict = kwargs.get("""latest_model_name""" , __A ) def __call__( self , **__A ) -> Any: lowerCAmelCase_ :List[Any] = {k: np.array(__A ) for k, v in kwargs.items()} return self.model.run(__A , __A ) @staticmethod def __lowerCAmelCase ( __A , __A=None , __A=None ) -> str: if provider is None: logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" ) lowerCAmelCase_ :Optional[Any] = """CPUExecutionProvider""" return ort.InferenceSession(__A , providers=[provider] , sess_options=__A ) def __lowerCAmelCase ( self , __A , __A = None , **__A ) -> Union[str, Any]: lowerCAmelCase_ :Union[str, Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME lowerCAmelCase_ :Any = self.model_save_dir.joinpath(self.latest_model_name ) lowerCAmelCase_ :str = Path(__A ).joinpath(__A ) try: shutil.copyfile(__A , __A ) except shutil.SameFileError: pass # copy external weights (for models >2GB) lowerCAmelCase_ :Union[str, Any] = self.model_save_dir.joinpath(__A ) if src_path.exists(): lowerCAmelCase_ :List[str] = Path(__A ).joinpath(__A ) try: shutil.copyfile(__A , __A ) except shutil.SameFileError: pass def __lowerCAmelCase ( self , __A , **__A , ) -> Union[str, Any]: if os.path.isfile(__A ): logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" ) return os.makedirs(__A , exist_ok=__A ) # saving model weights/files self._save_pretrained(__A , **__A ) @classmethod def __lowerCAmelCase ( cls , __A , __A = None , __A = None , __A = False , __A = None , __A = None , __A = None , __A = None , **__A , ) -> int: lowerCAmelCase_ :List[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(__A ): lowerCAmelCase_ :Optional[int] = OnnxRuntimeModel.load_model( os.path.join(__A , __A ) , provider=__A , sess_options=__A ) lowerCAmelCase_ :int = Path(__A ) # load model from hub else: # download model lowerCAmelCase_ :Optional[int] = hf_hub_download( repo_id=__A , filename=__A , use_auth_token=__A , revision=__A , cache_dir=__A , force_download=__A , ) lowerCAmelCase_ :Optional[int] = Path(__A ).parent lowerCAmelCase_ :Optional[Any] = Path(__A ).name lowerCAmelCase_ :str = OnnxRuntimeModel.load_model(__A , provider=__A , sess_options=__A ) return cls(model=__A , **__A ) @classmethod def __lowerCAmelCase ( cls , __A , __A = True , __A = None , __A = None , **__A , ) -> Union[str, Any]: lowerCAmelCase_ :str = None if len(str(__A ).split("""@""" ) ) == 2: lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = model_id.split("""@""" ) return cls._from_pretrained( model_id=__A , revision=__A , cache_dir=__A , force_download=__A , use_auth_token=__A , **__A , )
84
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = Dict[str, Any] lowerCamelCase__ = List[Prediction] @add_end_docstrings(lowerCamelCase__ ) class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): def __init__( self : Tuple , *__lowercase : Tuple , **__lowercase : Optional[int] ): '''simple docstring''' super().__init__(*__lowercase , **__lowercase ) if self.framework == "tf": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) requires_backends(self , """vision""" ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def UpperCamelCase_ ( self : Optional[int] , **__lowercase : List[str] ): '''simple docstring''' __a = {} if "threshold" in kwargs: __a = kwargs["""threshold"""] return {}, {}, postprocess_kwargs def __call__( self : List[Any] , *__lowercase : Any , **__lowercase : Tuple ): '''simple docstring''' return super().__call__(*__lowercase , **__lowercase ) def UpperCamelCase_ ( self : str , __lowercase : Tuple ): '''simple docstring''' __a = load_image(__lowercase ) __a = torch.IntTensor([[image.height, image.width]] ) __a = self.image_processor(images=[image] , return_tensors="""pt""" ) if self.tokenizer is not None: __a = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" ) __a = target_size return inputs def UpperCamelCase_ ( self : Dict , __lowercase : List[str] ): '''simple docstring''' __a = model_inputs.pop("""target_size""" ) __a = self.model(**__lowercase ) __a = outputs.__class__({"""target_size""": target_size, **outputs} ) if self.tokenizer is not None: __a = model_inputs["""bbox"""] return model_outputs def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any]=0.9 ): '''simple docstring''' __a = model_outputs["""target_size"""] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. __a , __a = target_size[0].tolist() def unnormalize(__lowercase : Optional[Any] ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) __a , __a = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) __a = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] __a = [unnormalize(__lowercase ) for bbox in model_outputs["""bbox"""].squeeze(0 )] __a = ["""score""", """label""", """box"""] __a = [dict(zip(__lowercase , __lowercase ) ) for vals in zip(scores.tolist() , __lowercase , __lowercase ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel __a = self.image_processor.post_process_object_detection(__lowercase , __lowercase , __lowercase ) __a = raw_annotations[0] __a = raw_annotation["""scores"""] __a = raw_annotation["""labels"""] __a = raw_annotation["""boxes"""] __a = scores.tolist() __a = [self.model.config.idalabel[label.item()] for label in labels] __a = [self._get_bounding_box(__lowercase ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] __a = ["""score""", """label""", """box"""] __a = [ dict(zip(__lowercase , __lowercase ) ) for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] ) ] return annotation def UpperCamelCase_ ( self : Optional[int] , __lowercase : "torch.Tensor" ): '''simple docstring''' if self.framework != "pt": raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" ) __a , __a , __a , __a = box.int().tolist() __a = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
302
0
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class _snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self ) -> Any: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) snake_case_ = UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , ) return model @property def lowerCAmelCase__ ( self ) -> Any: '''simple docstring''' torch.manual_seed(0 ) snake_case_ = UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , ) return model @property def lowerCAmelCase__ ( self ) -> Any: '''simple docstring''' torch.manual_seed(0 ) snake_case_ = AutoencoderKL( sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , ) snake_case_ = UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , ) return vqvae, unet @slow def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' snake_case_ = "cpu" # ensure determinism for the device-dependent torch.Generator snake_case_ = Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) snake_case_ = DDPMScheduler() snake_case_ = AudioDiffusionPipeline(vqvae=a__ , unet=self.dummy_unet , mel=a__ , scheduler=a__ ) snake_case_ = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) snake_case_ = torch.Generator(device=a__ ).manual_seed(42 ) snake_case_ = pipe(generator=a__ , steps=4 ) snake_case_ = output.audios[0] snake_case_ = output.images[0] snake_case_ = torch.Generator(device=a__ ).manual_seed(42 ) snake_case_ = pipe(generator=a__ , steps=4 , return_dict=a__ ) snake_case_ = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) snake_case_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10] snake_case_ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10] snake_case_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 snake_case_ = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) snake_case_ = DDIMScheduler() snake_case_ = self.dummy_vqvae_and_unet snake_case_ = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=a__ , scheduler=a__ ) snake_case_ = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) np.random.seed(0 ) snake_case_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) snake_case_ = torch.Generator(device=a__ ).manual_seed(42 ) snake_case_ = pipe(raw_audio=a__ , generator=a__ , start_step=5 , steps=10 ) snake_case_ = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) snake_case_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10] snake_case_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 snake_case_ = self.dummy_unet_condition snake_case_ = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=a__ , mel=a__ , scheduler=a__ ) snake_case_ = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) np.random.seed(0 ) snake_case_ = torch.rand((1, 1, 10) ) snake_case_ = pipe(generator=a__ , encoding=a__ ) snake_case_ = output.images[0] snake_case_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10] snake_case_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class _snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' snake_case_ = torch_device snake_case_ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" ) snake_case_ = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) snake_case_ = torch.Generator(device=a__ ).manual_seed(42 ) snake_case_ = pipe(generator=a__ ) snake_case_ = output.audios[0] snake_case_ = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] snake_case_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10] snake_case_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
85
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase__ = { """configuration_efficientnet""": [ """EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientNetConfig""", """EfficientNetOnnxConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""EfficientNetImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientNetForImageClassification""", """EfficientNetModel""", """EfficientNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
302
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class A__ ( metaclass=_lowerCamelCase): A_ : Tuple = ['transformers', 'torch', 'note_seq'] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): requires_backends(self , ['transformers', 'torch', 'note_seq'] ) @classmethod def __lowerCamelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) @classmethod def __lowerCamelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
86
import random def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" __a , __a , __a = [], [], [] for element in data: if element < pivot: less.append(_SCREAMING_SNAKE_CASE ) elif element > pivot: greater.append(_SCREAMING_SNAKE_CASE ) else: equal.append(_SCREAMING_SNAKE_CASE ) return less, equal, greater def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0: return None __a = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )] __a = 0 __a , __a , __a = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a = len(_SCREAMING_SNAKE_CASE ) __a = len(_SCREAMING_SNAKE_CASE ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # must be in larger else: return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
302
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''', # See all SEW models at https://huggingface.co/models?filter=sew } class snake_case_ ( __A ): __A : Dict = "sew" def __init__( self : int , lowercase_ : int=32 , lowercase_ : Dict=7_68 , lowercase_ : List[Any]=12 , lowercase_ : Any=12 , lowercase_ : int=30_72 , lowercase_ : Any=2 , lowercase_ : Tuple="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Tuple=0.0 , lowercase_ : List[Any]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : str=0.02 , lowercase_ : List[Any]=1E-5 , lowercase_ : Dict="group" , lowercase_ : Dict="gelu" , lowercase_ : Dict=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , lowercase_ : int=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase_ : List[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase_ : Optional[Any]=False , lowercase_ : Tuple=1_28 , lowercase_ : Optional[Any]=16 , lowercase_ : int=True , lowercase_ : int=0.05 , lowercase_ : Tuple=10 , lowercase_ : str=2 , lowercase_ : Dict=0.0 , lowercase_ : List[Any]=10 , lowercase_ : List[str]=0 , lowercase_ : Any="mean" , lowercase_ : Optional[int]=False , lowercase_ : Dict=False , lowercase_ : List[Any]=2_56 , lowercase_ : List[Any]=0 , lowercase_ : List[Any]=1 , lowercase_ : Optional[Any]=2 , **lowercase_ : List[str] , ) -> List[str]: super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ ) lowercase__ : Dict = hidden_size lowercase__ : Union[str, Any] = feat_extract_norm lowercase__ : List[str] = feat_extract_activation lowercase__ : Optional[Any] = list(lowercase_ ) lowercase__ : Optional[Any] = list(lowercase_ ) lowercase__ : str = list(lowercase_ ) lowercase__ : Any = conv_bias lowercase__ : Optional[Any] = num_conv_pos_embeddings lowercase__ : str = num_conv_pos_embedding_groups lowercase__ : Tuple = len(self.conv_dim ) lowercase__ : Any = num_hidden_layers lowercase__ : Optional[int] = intermediate_size lowercase__ : List[Any] = squeeze_factor lowercase__ : Tuple = hidden_act lowercase__ : Optional[int] = num_attention_heads lowercase__ : List[Any] = hidden_dropout lowercase__ : List[Any] = attention_dropout lowercase__ : Optional[int] = activation_dropout lowercase__ : int = feat_proj_dropout lowercase__ : int = final_dropout lowercase__ : Optional[int] = layerdrop lowercase__ : List[Any] = layer_norm_eps lowercase__ : str = initializer_range lowercase__ : Dict = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase__ : int = apply_spec_augment lowercase__ : List[str] = mask_time_prob lowercase__ : str = mask_time_length lowercase__ : int = mask_time_min_masks lowercase__ : str = mask_feature_prob lowercase__ : Any = mask_feature_length lowercase__ : Optional[Any] = mask_feature_min_masks # ctc loss lowercase__ : int = ctc_loss_reduction lowercase__ : Union[str, Any] = ctc_zero_infinity # sequence classification lowercase__ : Any = use_weighted_layer_sum lowercase__ : Dict = classifier_proj_size @property def __UpperCamelCase ( self : Optional[int] ) -> Tuple: return functools.reduce(operator.mul , self.conv_stride , 1 )
87
from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline lowerCamelCase__ = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): def __init__( self : Optional[int] , **__lowercase : Dict ): '''simple docstring''' super().__init__(**__lowercase ) if self.framework != "pt": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) # No specific FOR_XXX available yet def __call__( self : str , __lowercase : Union[np.ndarray, bytes, str] , **__lowercase : int ): '''simple docstring''' return super().__call__(__lowercase , **__lowercase ) def UpperCamelCase_ ( self : List[Any] , **__lowercase : Union[str, Any] ): '''simple docstring''' __a = {} if "candidate_labels" in kwargs: __a = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: __a = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def UpperCamelCase_ ( self : int , __lowercase : Dict , __lowercase : Dict=None , __lowercase : str="This is a sound of {}." ): '''simple docstring''' if isinstance(__lowercase , __lowercase ): if audio.startswith("""http://""" ) or audio.startswith("""https://""" ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png __a = requests.get(__lowercase ).content else: with open(__lowercase , """rb""" ) as f: __a = f.read() if isinstance(__lowercase , __lowercase ): __a = ffmpeg_read(__lowercase , self.feature_extractor.sampling_rate ) if not isinstance(__lowercase , np.ndarray ): raise ValueError("""We expect a numpy ndarray as input""" ) if len(audio.shape ) != 1: raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" ) __a = self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" ) __a = candidate_labels __a = [hypothesis_template.format(__lowercase ) for x in candidate_labels] __a = self.tokenizer(__lowercase , return_tensors=self.framework , padding=__lowercase ) __a = [text_inputs] return inputs def UpperCamelCase_ ( self : Any , __lowercase : Any ): '''simple docstring''' __a = model_inputs.pop("""candidate_labels""" ) __a = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] , __lowercase ): __a = text_inputs[0] else: # Batching case. __a = text_inputs[0][0] __a = self.model(**__lowercase , **__lowercase ) __a = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_audio, } return model_outputs def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Dict ): '''simple docstring''' __a = model_outputs.pop("""candidate_labels""" ) __a = model_outputs["""logits"""][0] if self.framework == "pt": __a = logits.softmax(dim=0 ) __a = probs.tolist() else: raise ValueError("""`tf` framework not supported.""" ) __a = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(__lowercase , __lowercase ) , key=lambda __lowercase : -x[0] ) ] return result
302
0
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Any=[1, 2, 1] , UpperCamelCase__ : int=[2, 2, 4] , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[int]=2.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Union[str, Any]=1E-5 , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=10 , UpperCamelCase__ : Dict=8 , UpperCamelCase__ : Tuple=["stage1", "stage2", "stage3"] , UpperCamelCase__ : Tuple=[1, 2, 3] , ) -> Dict: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = image_size __magic_name__ = patch_size __magic_name__ = num_channels __magic_name__ = embed_dim __magic_name__ = depths __magic_name__ = num_heads __magic_name__ = window_size __magic_name__ = mlp_ratio __magic_name__ = qkv_bias __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = drop_path_rate __magic_name__ = hidden_act __magic_name__ = use_absolute_embeddings __magic_name__ = patch_norm __magic_name__ = layer_norm_eps __magic_name__ = initializer_range __magic_name__ = is_training __magic_name__ = scope __magic_name__ = use_labels __magic_name__ = type_sequence_label_size __magic_name__ = encoder_stride __magic_name__ = out_features __magic_name__ = out_indices def _lowercase ( self : str ) -> Optional[int]: """simple docstring""" __magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = self.get_config() return config, pixel_values, labels def _lowercase ( self : Tuple ) -> str: """simple docstring""" return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[str]: """simple docstring""" __magic_name__ = MaskFormerSwinModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ ) __magic_name__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __magic_name__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _lowercase ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ) -> Tuple: """simple docstring""" __magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(UpperCamelCase__ ): __magic_name__ = ["""stem"""] __magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ ) def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() __magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs __magic_name__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) a__ = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {} a__ = False a__ = False a__ = False a__ = False a__ = False def _lowercase ( self : Any ) -> List[str]: """simple docstring""" __magic_name__ = MaskFormerSwinModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( """`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with""" """ `nn.DataParallel`""" ) ) def _lowercase ( self : List[str] ) -> Optional[int]: """simple docstring""" pass def _lowercase ( self : str ) -> Dict: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowercase ( self : Optional[int] ) -> List[str]: """simple docstring""" return def _lowercase ( self : str ) -> str: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*UpperCamelCase__ ) @unittest.skip("""Swin does not use inputs_embeds""" ) def _lowercase ( self : Any ) -> int: """simple docstring""" pass @unittest.skip("""Swin does not support feedforward chunking""" ) def _lowercase ( self : str ) -> List[Any]: """simple docstring""" pass def _lowercase ( self : Union[str, Any] ) -> Dict: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __magic_name__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def _lowercase ( self : Tuple ) -> Dict: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = model_class(UpperCamelCase__ ) __magic_name__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __magic_name__ = [*signature.parameters.keys()] __magic_name__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) @unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" ) def _lowercase ( self : Tuple ) -> int: """simple docstring""" pass @unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" ) def _lowercase ( self : List[str] ) -> Dict: """simple docstring""" pass def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ) -> Any: """simple docstring""" __magic_name__ = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): __magic_name__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) __magic_name__ = outputs.hidden_states __magic_name__ = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # Swin has a different seq_length __magic_name__ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _lowercase ( self : Dict ) -> Dict: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __magic_name__ = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __magic_name__ = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = 3 __magic_name__ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __magic_name__ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __magic_name__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __magic_name__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __magic_name__ = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __magic_name__ = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) ) @unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" ) def _lowercase ( self : Optional[int] ) -> int: """simple docstring""" pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def _lowercase ( self : List[str] ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def _lowercase ( self : Dict ) -> Optional[Any]: """simple docstring""" pass def _lowercase ( self : Dict ) -> Any: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(UpperCamelCase__ : Union[str, Any] ): __magic_name__ = 0 return t def check_equivalence(UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int={} ): with torch.no_grad(): __magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ).to_tuple() def recursive_check(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ): if isinstance(UpperCamelCase__ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase__ , UpperCamelCase__ ): recursive_check(UpperCamelCase__ , UpperCamelCase__ ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(UpperCamelCase__ , UpperCamelCase__ ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(UpperCamelCase__ ) , set_nan_tensor_to_zero(UpperCamelCase__ ) , atol=1E-5 ) , msg=( """Tuple and dict output are not equal. Difference:""" F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' F''' {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}. Dict has''' F''' `nan`: {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}.''' ) , ) recursive_check(UpperCamelCase__ , UpperCamelCase__ ) for model_class in self.all_model_classes: __magic_name__ = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} ) __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} ) @require_torch class UpperCAmelCase_ ( unittest.TestCase , _A ): '''simple docstring''' a__ = (MaskFormerSwinBackbone,) if is_torch_available() else () a__ = MaskFormerSwinConfig def _lowercase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __magic_name__ = MaskFormerSwinModelTester(self ) def _lowercase ( self : List[str] ) -> Optional[Any]: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = inputs_dict["""pixel_values"""].shape[0] for backbone_class in self.all_model_classes: __magic_name__ = backbone_class(UpperCamelCase__ ) backbone.to(UpperCamelCase__ ) backbone.eval() __magic_name__ = backbone(**UpperCamelCase__ ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , UpperCamelCase__ ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True __magic_name__ = backbone(**UpperCamelCase__ , output_hidden_states=UpperCamelCase__ ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) __magic_name__ , __magic_name__ , __magic_name__ = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: __magic_name__ = backbone(**UpperCamelCase__ , output_attentions=UpperCamelCase__ ) self.assertIsNotNone(outputs.attentions )
88
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowerCamelCase__ = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Dict =['pixel_values'] def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Dict , ): '''simple docstring''' super().__init__(**__lowercase ) __a = size if size is not None else {"""height""": 224, """width""": 224} __a = get_size_dict(__lowercase ) __a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __a = get_size_dict(__lowercase , default_to_square=__lowercase , param_name="""crop_size""" ) __a = do_resize __a = do_rescale __a = do_normalize __a = do_center_crop __a = crop_size __a = size __a = resample __a = rescale_factor __a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __a = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ): '''simple docstring''' __a = get_size_dict(__lowercase ) if "shortest_edge" in size: __a = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: __a = (size["""height"""], size["""width"""]) else: raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" ) return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase ) def UpperCamelCase_ ( self : str , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ): '''simple docstring''' __a = get_size_dict(__lowercase ) if "height" not in size or "width" not in size: raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" ) return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase ) def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str ): '''simple docstring''' return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase ) def UpperCamelCase_ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ): '''simple docstring''' return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase ) def UpperCamelCase_ ( self : Tuple , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : int = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : List[Any] , ): '''simple docstring''' __a = do_resize if do_resize is not None else self.do_resize __a = do_rescale if do_rescale is not None else self.do_rescale __a = do_normalize if do_normalize is not None else self.do_normalize __a = do_center_crop if do_center_crop is not None else self.do_center_crop __a = crop_size if crop_size is not None else self.crop_size __a = get_size_dict(__lowercase , param_name="""crop_size""" , default_to_square=__lowercase ) __a = resample if resample is not None else self.resample __a = rescale_factor if rescale_factor is not None else self.rescale_factor __a = image_mean if image_mean is not None else self.image_mean __a = image_std if image_std is not None else self.image_std __a = size if size is not None else self.size __a = get_size_dict(__lowercase ) if not is_batched(__lowercase ): __a = [images] if not valid_images(__lowercase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. __a = [to_numpy_array(__lowercase ) for image in images] if do_resize: __a = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images] if do_center_crop: __a = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images] if do_rescale: __a = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images] if do_normalize: __a = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images] __a = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images] __a = {"""pixel_values""": images} return BatchFeature(data=__lowercase , tensor_type=__lowercase )
302
0
'''simple docstring''' import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class __magic_name__ ( _UpperCamelCase ): def __init__( self : Union[str, Any] ): _a : Dict = [] def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : Any ): self.events.append('on_init_end' ) def __lowercase ( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Tuple ,**_UpperCAmelCase : Dict ): self.events.append('on_train_begin' ) def __lowercase ( self : List[str] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : List[Any] ): self.events.append('on_train_end' ) def __lowercase ( self : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ,**_UpperCAmelCase : Optional[Any] ): self.events.append('on_epoch_begin' ) def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : Optional[Any] ): self.events.append('on_epoch_end' ) def __lowercase ( self : Dict ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,**_UpperCAmelCase : Optional[int] ): self.events.append('on_step_begin' ) def __lowercase ( self : str ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ,**_UpperCAmelCase : Dict ): self.events.append('on_step_end' ) def __lowercase ( self : Dict ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[Any] ,**_UpperCAmelCase : Any ): self.events.append('on_evaluate' ) def __lowercase ( self : int ,_UpperCAmelCase : Any ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : int ): self.events.append('on_predict' ) def __lowercase ( self : int ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : Tuple ): self.events.append('on_save' ) def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : Union[str, Any] ): self.events.append('on_log' ) def __lowercase ( self : Optional[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int ,**_UpperCAmelCase : List[str] ): self.events.append('on_prediction_step' ) @require_torch class __magic_name__ ( unittest.TestCase ): def __lowercase ( self : int ): _a : Optional[int] = tempfile.mkdtemp() def __lowercase ( self : str ): shutil.rmtree(self.output_dir ) def __lowercase ( self : int ,_UpperCAmelCase : Dict=0 ,_UpperCAmelCase : str=0 ,_UpperCAmelCase : Dict=64 ,_UpperCAmelCase : List[str]=64 ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=False ,**_UpperCAmelCase : int ): # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. _a : int = RegressionDataset(length=_UpperCAmelCase ) _a : str = RegressionDataset(length=_UpperCAmelCase ) _a : List[Any] = RegressionModelConfig(a=_UpperCAmelCase ,b=_UpperCAmelCase ) _a : Optional[int] = RegressionPreTrainedModel(_UpperCAmelCase ) _a : Optional[int] = TrainingArguments(self.output_dir ,disable_tqdm=_UpperCAmelCase ,report_to=[] ,**_UpperCAmelCase ) return Trainer( _UpperCAmelCase ,_UpperCAmelCase ,train_dataset=_UpperCAmelCase ,eval_dataset=_UpperCAmelCase ,callbacks=_UpperCAmelCase ,) def __lowercase ( self : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any] ): self.assertEqual(len(_UpperCAmelCase ) ,len(_UpperCAmelCase ) ) # Order doesn't matter _a : List[Any] = sorted(_UpperCAmelCase ,key=lambda _UpperCAmelCase : cb.__name__ if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else cb.__class__.__name__ ) _a : List[str] = sorted(_UpperCAmelCase ,key=lambda _UpperCAmelCase : cb.__name__ if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else cb.__class__.__name__ ) for cba, cba in zip(_UpperCAmelCase ,_UpperCAmelCase ): if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and isinstance(_UpperCAmelCase ,_UpperCAmelCase ): self.assertEqual(_UpperCAmelCase ,_UpperCAmelCase ) elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): self.assertEqual(_UpperCAmelCase ,cba.__class__ ) elif not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and isinstance(_UpperCAmelCase ,_UpperCAmelCase ): self.assertEqual(cba.__class__ ,_UpperCAmelCase ) else: self.assertEqual(_UpperCAmelCase ,_UpperCAmelCase ) def __lowercase ( self : Any ,_UpperCAmelCase : Optional[Any] ): _a : str = ['on_init_end', 'on_train_begin'] _a : str = 0 _a : Optional[int] = len(trainer.get_eval_dataloader() ) _a : List[Any] = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate'] for _ in range(trainer.state.num_train_epochs ): expected_events.append('on_epoch_begin' ) for _ in range(_UpperCAmelCase ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append('on_log' ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append('on_save' ) expected_events.append('on_epoch_end' ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def __lowercase ( self : Optional[Any] ): _a : str = self.get_trainer() _a : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase ) # Callbacks passed at init are added to the default callbacks _a : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(_UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback _a : Union[str, Any] = self.get_trainer(disable_tqdm=_UpperCAmelCase ) _a : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase ) def __lowercase ( self : Tuple ): _a : List[str] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] _a : Optional[int] = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(_UpperCAmelCase ) expected_callbacks.remove(_UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase ) _a : Optional[Any] = self.get_trainer() _a : List[Any] = trainer.pop_callback(_UpperCAmelCase ) self.assertEqual(cb.__class__ ,_UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase ) trainer.add_callback(_UpperCAmelCase ) expected_callbacks.insert(0 ,_UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase ) # We can also add, pop, or remove by instance _a : str = self.get_trainer() _a : Optional[Any] = trainer.callback_handler.callbacks[0] trainer.remove_callback(_UpperCAmelCase ) expected_callbacks.remove(_UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase ) _a : Tuple = self.get_trainer() _a : Optional[Any] = trainer.callback_handler.callbacks[0] _a : Optional[Any] = trainer.pop_callback(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase ,_UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase ) trainer.add_callback(_UpperCAmelCase ) expected_callbacks.insert(0 ,_UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase ) def __lowercase ( self : List[Any] ): import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action='ignore' ,category=_UpperCAmelCase ) _a : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() _a : Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(_UpperCAmelCase ,self.get_expected_events(_UpperCAmelCase ) ) # Independent log/save/eval _a : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 ) trainer.train() _a : Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(_UpperCAmelCase ,self.get_expected_events(_UpperCAmelCase ) ) _a : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 ) trainer.train() _a : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(_UpperCAmelCase ,self.get_expected_events(_UpperCAmelCase ) ) _a : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='steps' ) trainer.train() _a : Tuple = trainer.callback_handler.callbacks[-2].events self.assertEqual(_UpperCAmelCase ,self.get_expected_events(_UpperCAmelCase ) ) _a : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='epoch' ) trainer.train() _a : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(_UpperCAmelCase ,self.get_expected_events(_UpperCAmelCase ) ) # A bit of everything _a : Optional[int] = self.get_trainer( callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=10 ,eval_steps=5 ,evaluation_strategy='steps' ,) trainer.train() _a : List[str] = trainer.callback_handler.callbacks[-2].events self.assertEqual(_UpperCAmelCase ,self.get_expected_events(_UpperCAmelCase ) ) # warning should be emitted for duplicated callbacks with patch('transformers.trainer_callback.logger.warning' ) as warn_mock: _a : List[str] = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,) assert str(_UpperCAmelCase ) in warn_mock.call_args[0][0]
89
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' __a = """hf-internal-testing/tiny-random-t5""" __a = AutoTokenizer.from_pretrained(__lowercase ) __a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ) __a = tokenizer("""This is me""" , return_tensors="""pt""" ) __a = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) __a = model.generate(**__lowercase ) __a = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowercase ) __a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) __a = model_reloaded.generate(**__lowercase ) self.assertTrue(torch.allclose(__lowercase , __lowercase ) ) def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' __a = """hf-internal-testing/tiny-random-t5""" __a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ) __a = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(__lowercase ): model.save_pretrained(__lowercase ) __a = model.reverse_bettertransformer() model.save_pretrained(__lowercase )
302
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __A = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["FNetTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["FNetTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "FNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FNetForMaskedLM", "FNetForMultipleChoice", "FNetForNextSentencePrediction", "FNetForPreTraining", "FNetForQuestionAnswering", "FNetForSequenceClassification", "FNetForTokenClassification", "FNetLayer", "FNetModel", "FNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
90
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig lowerCamelCase__ = { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Optional[Any] ='albert' def __init__( self : Optional[Any] , __lowercase : Union[str, Any]=30000 , __lowercase : List[str]=128 , __lowercase : Optional[Any]=4096 , __lowercase : Dict=12 , __lowercase : Any=1 , __lowercase : Optional[Any]=64 , __lowercase : Any=16384 , __lowercase : Any=1 , __lowercase : Union[str, Any]="gelu_new" , __lowercase : List[str]=0 , __lowercase : int=0 , __lowercase : Dict=512 , __lowercase : str=2 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=1E-12 , __lowercase : int=0.1 , __lowercase : Any="absolute" , __lowercase : Optional[int]=0 , __lowercase : Dict=2 , __lowercase : Optional[Any]=3 , **__lowercase : Any , ): '''simple docstring''' super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) __a = vocab_size __a = embedding_size __a = hidden_size __a = num_hidden_layers __a = num_hidden_groups __a = num_attention_heads __a = inner_group_num __a = hidden_act __a = intermediate_size __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = type_vocab_size __a = initializer_range __a = layer_norm_eps __a = classifier_dropout_prob __a = position_embedding_type class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): @property def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' if self.task == "multiple-choice": __a = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __a = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
302
0
"""simple docstring""" import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase_ : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""") UpperCAmelCase_ : List[str] = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""") UpperCAmelCase_ : Optional[int] = """pt""" if is_torch_available() else """tf""" @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = CamembertTokenizer __UpperCamelCase = CamembertTokenizerFast __UpperCamelCase = True __UpperCamelCase = True def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE_ : Optional[int] = CamembertTokenizer(lowercase_) tokenizer.save_pretrained(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = '''<pad>''' SCREAMING_SNAKE_CASE_ : Optional[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_) , lowercase_) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_) , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''') self.assertEqual(vocab_keys[1] , '''<pad>''') self.assertEqual(vocab_keys[-1] , '''<mask>''') self.assertEqual(len(lowercase_) , 1004) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1005) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = CamembertTokenizer(lowercase_) tokenizer.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ : Tuple = CamembertTokenizerFast.from_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ : Tuple = '''I was born in 92000, and this is falsé.''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.encode(lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = rust_tokenizer.encode(lowercase_) self.assertListEqual(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Any = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) self.assertListEqual(lowercase_ , lowercase_) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowercase_) SCREAMING_SNAKE_CASE_ : Any = rust_tokenizer.tokenize(lowercase_) self.assertListEqual(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE_ : Optional[int] = '''I was born in 92000, and this is falsé.''' SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = rust_tokenizer.tokenize(lowercase_) self.assertListEqual(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Any = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) self.assertListEqual(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.encode(lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = rust_tokenizer.encode(lowercase_) self.assertListEqual(lowercase_ , lowercase_) @slow def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = {'''input_ids''': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. SCREAMING_SNAKE_CASE_ : Tuple = [ '''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ''' '''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''', '''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ''' '''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ''' '''telles que la traduction et la synthèse de texte.''', ] self.tokenizer_integration_test_util( expected_encoding=lowercase_ , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=lowercase_ , )
91
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCamelCase__ = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
302
0
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def _a ( ): __lowerCAmelCase = ArgumentParser( description=( "PyTorch TPU distributed training launch " "helper utility that will spawn up " "multiple distributed processes" ) ) # Optional arguments for the launch helper parser.add_argument("--num_cores" , type=SCREAMING_SNAKE_CASE_ , default=1 , help="Number of TPU cores to use (1 or 8)." ) # positional parser.add_argument( "training_script" , type=SCREAMING_SNAKE_CASE_ , help=( "The full path to the single TPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script" ) , ) # rest from the training program parser.add_argument("training_script_args" , nargs=SCREAMING_SNAKE_CASE_ ) return parser.parse_args() def _a ( ): __lowerCAmelCase = parse_args() # Import training_script as a module. __lowerCAmelCase = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) __lowerCAmelCase = script_fpath.stem __lowerCAmelCase = importlib.import_module(SCREAMING_SNAKE_CASE_ ) # Patch sys.argv __lowerCAmelCase = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
92
class SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , __lowercase : Union[str, Any] ): '''simple docstring''' __a = val __a = None __a = None def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any ): '''simple docstring''' if self.val: if val < self.val: if self.left is None: __a = Node(__lowercase ) else: self.left.insert(__lowercase ) elif val > self.val: if self.right is None: __a = Node(__lowercase ) else: self.right.insert(__lowercase ) else: __a = val def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" if root: inorder(root.left , _SCREAMING_SNAKE_CASE ) res.append(root.val ) inorder(root.right , _SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" if len(_SCREAMING_SNAKE_CASE ) == 0: return arr __a = Node(arr[0] ) for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): root.insert(arr[i] ) # Traverse BST in order. __a = [] inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
302
0
'''simple docstring''' import requests from bsa import BeautifulSoup def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : dict ): """simple docstring""" lowercase_ : Union[str, Any] = BeautifulSoup(requests.get(__SCREAMING_SNAKE_CASE , params=__SCREAMING_SNAKE_CASE ).content , '''html.parser''' ) lowercase_ : List[Any] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) lowercase_ : List[str] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": _lowercase : Optional[int] = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 3_0, "pages": "3979-3990", "year": 2_0_1_8, "hl": "en", } print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
93
import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): def UpperCamelCase_ ( self : str ): '''simple docstring''' __a = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__lowercase , """width_multiplier""" ) ) class SCREAMING_SNAKE_CASE : def __init__( self : Dict , __lowercase : Union[str, Any] , __lowercase : Dict=13 , __lowercase : int=64 , __lowercase : Tuple=2 , __lowercase : Tuple=3 , __lowercase : Tuple="swish" , __lowercase : List[Any]=3 , __lowercase : List[str]=32 , __lowercase : int=0.1 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[int]=True , __lowercase : Dict=True , __lowercase : Tuple=10 , __lowercase : str=None , __lowercase : Optional[Any]=0.25 , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0 , ): '''simple docstring''' __a = parent __a = batch_size __a = image_size __a = patch_size __a = num_channels __a = make_divisible(512 * width_multiplier , divisor=8 ) __a = hidden_act __a = conv_kernel_size __a = output_stride __a = classifier_dropout_prob __a = use_labels __a = is_training __a = num_labels __a = initializer_range __a = scope __a = width_multiplier __a = ffn_dropout __a = attn_dropout def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' __a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a = None __a = None if self.use_labels: __a = ids_tensor([self.batch_size] , self.num_labels ) __a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __a = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple ): '''simple docstring''' __a = MobileViTVaModel(config=__lowercase ) model.to(__lowercase ) model.eval() __a = model(__lowercase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] ): '''simple docstring''' __a = self.num_labels __a = MobileViTVaForImageClassification(__lowercase ) model.to(__lowercase ) model.eval() __a = model(__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : int , __lowercase : str , __lowercase : Any , __lowercase : int , __lowercase : List[str] ): '''simple docstring''' __a = self.num_labels __a = MobileViTVaForSemanticSegmentation(__lowercase ) model.to(__lowercase ) model.eval() __a = model(__lowercase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) __a = model(__lowercase , labels=__lowercase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCamelCase_ ( self : Dict ): '''simple docstring''' __a = self.prepare_config_and_inputs() __a , __a , __a , __a = config_and_inputs __a = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : List[Any] =( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) __lowerCamelCase : Any =( { 'feature-extraction': MobileViTVaModel, 'image-classification': MobileViTVaForImageClassification, 'image-segmentation': MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) __lowerCamelCase : Dict =False __lowerCamelCase : Optional[Any] =False __lowerCamelCase : int =False __lowerCamelCase : Any =False def UpperCamelCase_ ( self : Dict ): '''simple docstring''' __a = MobileViTVaModelTester(self ) __a = MobileViTVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase ) def UpperCamelCase_ ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" ) def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" ) def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' pass @unittest.skip(reason="""MobileViTV2 does not output attentions""" ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" ) def UpperCamelCase_ ( self : int ): '''simple docstring''' pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' pass def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = model_class(__lowercase ) __a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a = [*signature.parameters.keys()] __a = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __lowercase ) def UpperCamelCase_ ( self : Dict ): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def UpperCamelCase_ ( self : int ): '''simple docstring''' def check_hidden_states_output(__lowercase : List[str] , __lowercase : Optional[int] , __lowercase : List[str] ): __a = model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): __a = model(**self._prepare_for_class(__lowercase , __lowercase ) ) __a = outputs.hidden_states __a = 5 self.assertEqual(len(__lowercase ) , __lowercase ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. __a = 2 for i in range(len(__lowercase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a = True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowercase ) def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase ) @slow def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a = MobileViTVaModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def lowerCAmelCase__ ( ): """simple docstring""" __a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self : Dict ): '''simple docstring''' return ( MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ) if is_vision_available() else None ) @slow def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' __a = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to( __lowercase ) __a = self.default_image_processor __a = prepare_img() __a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase ) # forward pass with torch.no_grad(): __a = model(**__lowercase ) # verify the logits __a = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowercase ) __a = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) ) @slow def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) __a = model.to(__lowercase ) __a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) __a = prepare_img() __a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase ) # forward pass with torch.no_grad(): __a = model(**__lowercase ) __a = outputs.logits # verify the logits __a = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , __lowercase ) __a = torch.tensor( [ [[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]], [[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]], [[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]], ] , device=__lowercase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) ) @slow def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) __a = model.to(__lowercase ) __a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) __a = prepare_img() __a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase ) # forward pass with torch.no_grad(): __a = model(**__lowercase ) __a = outputs.logits.detach().cpu() __a = image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] ) __a = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , __lowercase ) __a = image_processor.post_process_semantic_segmentation(outputs=__lowercase ) __a = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , __lowercase )
302
0
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case : List[Any] = logging.get_logger(__name__) # TODO Update this snake_case : Union[str, Any] = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'esm' def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1026 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase="absolute" , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ): super().__init__(pad_token_id=_lowerCamelCase , mask_token_id=_lowerCamelCase , **_lowerCamelCase ) a :Tuple = vocab_size a :List[str] = hidden_size a :int = num_hidden_layers a :int = num_attention_heads a :Union[str, Any] = intermediate_size a :Union[str, Any] = hidden_dropout_prob a :Any = attention_probs_dropout_prob a :List[Any] = max_position_embeddings a :str = initializer_range a :Tuple = layer_norm_eps a :Union[str, Any] = position_embedding_type a :List[str] = use_cache a :str = emb_layer_norm_before a :List[str] = token_dropout a :str = is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) a :Optional[Any] = EsmFoldConfig() elif isinstance(_lowerCamelCase , _lowerCamelCase ): a :Dict = EsmFoldConfig(**_lowerCamelCase ) a :Optional[Any] = esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) a :str = get_default_vocab_list() else: a :Dict = vocab_list else: a :Tuple = None a :List[str] = None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , _lowerCamelCase ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def SCREAMING_SNAKE_CASE__ ( self ): a :Any = super().to_dict() if isinstance(self.esmfold_config , _lowerCamelCase ): a :Optional[Any] = self.esmfold_config.to_dict() return output @dataclass class _snake_case : SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = 128 SCREAMING_SNAKE_CASE__ = None def SCREAMING_SNAKE_CASE__ ( self ): if self.trunk is None: a :List[str] = TrunkConfig() elif isinstance(self.trunk , _lowerCamelCase ): a :List[Any] = TrunkConfig(**self.trunk ) def SCREAMING_SNAKE_CASE__ ( self ): a :Any = asdict(self ) a :Any = self.trunk.to_dict() return output @dataclass class _snake_case : SCREAMING_SNAKE_CASE__ = 48 SCREAMING_SNAKE_CASE__ = 1024 SCREAMING_SNAKE_CASE__ = 128 SCREAMING_SNAKE_CASE__ = 32 SCREAMING_SNAKE_CASE__ = 32 SCREAMING_SNAKE_CASE__ = 32 SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = 4 SCREAMING_SNAKE_CASE__ = 128 SCREAMING_SNAKE_CASE__ = None def SCREAMING_SNAKE_CASE__ ( self ): if self.structure_module is None: a :List[Any] = StructureModuleConfig() elif isinstance(self.structure_module , _lowerCamelCase ): a :Optional[int] = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) a :Tuple = self.sequence_state_dim // self.sequence_head_width a :Union[str, Any] = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def SCREAMING_SNAKE_CASE__ ( self ): a :Union[str, Any] = asdict(self ) a :Dict = self.structure_module.to_dict() return output @dataclass class _snake_case : SCREAMING_SNAKE_CASE__ = 384 SCREAMING_SNAKE_CASE__ = 128 SCREAMING_SNAKE_CASE__ = 16 SCREAMING_SNAKE_CASE__ = 128 SCREAMING_SNAKE_CASE__ = 12 SCREAMING_SNAKE_CASE__ = 4 SCREAMING_SNAKE_CASE__ = 8 SCREAMING_SNAKE_CASE__ = 0.1 SCREAMING_SNAKE_CASE__ = 8 SCREAMING_SNAKE_CASE__ = 1 SCREAMING_SNAKE_CASE__ = 2 SCREAMING_SNAKE_CASE__ = 7 SCREAMING_SNAKE_CASE__ = 10 SCREAMING_SNAKE_CASE__ = 1e-8 SCREAMING_SNAKE_CASE__ = 1e5 def SCREAMING_SNAKE_CASE__ ( self ): return asdict(self ) def __lowerCamelCase ( ): """simple docstring""" return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
94
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
302
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : str = { """configuration_clipseg""": [ """CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CLIPSegConfig""", """CLIPSegTextConfig""", """CLIPSegVisionConfig""", ], """processing_clipseg""": ["""CLIPSegProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : int = [ """CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""", """CLIPSegModel""", """CLIPSegPreTrainedModel""", """CLIPSegTextModel""", """CLIPSegVisionModel""", """CLIPSegForImageSegmentation""", ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
95
import string import numpy def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" return b if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE ) class SCREAMING_SNAKE_CASE : __lowerCamelCase : List[str] =string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) __lowerCamelCase : List[Any] =numpy.vectorize(lambda lowerCamelCase__ : x % 36 ) __lowerCamelCase : Optional[Any] =numpy.vectorize(lowerCamelCase__ ) def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray ): '''simple docstring''' __a = self.modulus(__lowercase ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key __a = encrypt_key.shape[0] def UpperCamelCase_ ( self : Dict , __lowercase : str ): '''simple docstring''' return self.key_string.index(__lowercase ) def UpperCamelCase_ ( self : Dict , __lowercase : int ): '''simple docstring''' return self.key_string[round(__lowercase )] def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: __a = det % len(self.key_string ) __a = len(self.key_string ) if greatest_common_divisor(__lowercase , len(self.key_string ) ) != 1: __a = ( F"determinant modular {req_l} of encryption key({det}) " F"is not co prime w.r.t {req_l}.\nTry another key." ) raise ValueError(__lowercase ) def UpperCamelCase_ ( self : Dict , __lowercase : str ): '''simple docstring''' __a = [char for char in text.upper() if char in self.key_string] __a = chars[-1] while len(__lowercase ) % self.break_key != 0: chars.append(__lowercase ) return "".join(__lowercase ) def UpperCamelCase_ ( self : List[str] , __lowercase : str ): '''simple docstring''' __a = self.process_text(text.upper() ) __a = """""" for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ): __a = text[i : i + self.break_key] __a = [self.replace_letters(__lowercase ) for char in batch] __a = numpy.array([vec] ).T __a = self.modulus(self.encrypt_key.dot(__lowercase ) ).T.tolist()[ 0 ] __a = """""".join( self.replace_digits(__lowercase ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' __a = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: __a = det % len(self.key_string ) __a = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: __a = i break __a = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(__lowercase ) ) def UpperCamelCase_ ( self : Any , __lowercase : str ): '''simple docstring''' __a = self.make_decrypt_key() __a = self.process_text(text.upper() ) __a = """""" for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ): __a = text[i : i + self.break_key] __a = [self.replace_letters(__lowercase ) for char in batch] __a = numpy.array([vec] ).T __a = self.modulus(decrypt_key.dot(__lowercase ) ).T.tolist()[0] __a = """""".join( self.replace_digits(__lowercase ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def lowerCAmelCase__ ( ): """simple docstring""" __a = int(input("""Enter the order of the encryption key: """ ) ) __a = [] print("""Enter each row of the encryption key with space separated integers""" ) for _ in range(_SCREAMING_SNAKE_CASE ): __a = [int(_SCREAMING_SNAKE_CASE ) for x in input().split()] hill_matrix.append(_SCREAMING_SNAKE_CASE ) __a = HillCipher(numpy.array(_SCREAMING_SNAKE_CASE ) ) print("""Would you like to encrypt or decrypt some text? (1 or 2)""" ) __a = input("""\n1. Encrypt\n2. Decrypt\n""" ) if option == "1": __a = input("""What text would you like to encrypt?: """ ) print("""Your encrypted text is:""" ) print(hc.encrypt(_SCREAMING_SNAKE_CASE ) ) elif option == "2": __a = input("""What text would you like to decrypt?: """ ) print("""Your decrypted text is:""" ) print(hc.decrypt(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
302
0
"""simple docstring""" from __future__ import annotations def _snake_case ( lowercase__ ): _lowerCamelCase : str = str(lowercase__ ) return n == n[::-1] def _snake_case ( lowercase__ = 1000000 ): _lowerCamelCase : Optional[int] = 0 for i in range(1 , lowercase__ ): if is_palindrome(lowercase__ ) and is_palindrome(bin(lowercase__ ).split('b' )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
96
from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : List[Any] ='autoformer' __lowerCamelCase : str ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self : List[Any] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : str = "student_t" , __lowercase : str = "nll" , __lowercase : int = 1 , __lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase : bool = True , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : Optional[List[int]] = None , __lowercase : Optional[List[int]] = None , __lowercase : int = 64 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 32 , __lowercase : int = 32 , __lowercase : str = "gelu" , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 100 , __lowercase : float = 0.02 , __lowercase : bool = True , __lowercase : List[Any]=True , __lowercase : int = 10 , __lowercase : int = 25 , __lowercase : int = 3 , **__lowercase : Optional[int] , ): '''simple docstring''' # time series specific configuration __a = prediction_length __a = context_length if context_length is not None else prediction_length __a = distribution_output __a = loss __a = input_size __a = num_time_features __a = lags_sequence __a = scaling __a = num_dynamic_real_features __a = num_static_real_features __a = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(__lowercase ) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""" ) __a = cardinality else: __a = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(__lowercase ) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""" ) __a = embedding_dimension else: __a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] __a = num_parallel_samples # Transformer architecture configuration __a = input_size * len(self.lags_sequence ) + self._number_of_features __a = d_model __a = encoder_attention_heads __a = decoder_attention_heads __a = encoder_ffn_dim __a = decoder_ffn_dim __a = encoder_layers __a = decoder_layers __a = dropout __a = attention_dropout __a = activation_dropout __a = encoder_layerdrop __a = decoder_layerdrop __a = activation_function __a = init_std __a = use_cache # Autoformer __a = label_length __a = moving_average __a = autocorrelation_factor super().__init__(is_encoder_decoder=__lowercase , **__lowercase ) @property def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
302
0
'''simple docstring''' import pytest __snake_case = '''__dummy_dataset1__''' __snake_case = ''' import json import os import datasets REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/" URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string")), "ner_tags": datasets.Sequence( datasets.features.ClassLabel( names=[ "O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", ] ) ), "langs": datasets.Sequence(datasets.Value("string")), "spans": datasets.Sequence(datasets.Value("string")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}), ] def _generate_examples(self, filepath): with open(filepath, "r", encoding="utf-8") as f: for i, line in enumerate(f): yield i, json.loads(line) ''' @pytest.fixture def a ( ) -> List[str]: '''simple docstring''' return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def a ( ) -> List[Any]: '''simple docstring''' return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def a ( __a , __a , __a ) -> int: '''simple docstring''' UpperCamelCase__ :Any = dataset_loading_script_name UpperCamelCase__ :Optional[int] = tmp_path / '''datasets''' / script_name script_dir.mkdir(parents=__a ) UpperCamelCase__ :List[str] = script_dir / f'''{script_name}.py''' with open(__a , '''w''' ) as f: f.write(__a ) return str(__a )
97
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
302
0
"""simple docstring""" from __future__ import annotations import queue class snake_case : """simple docstring""" def __init__( self : int ,lowerCamelCase__ : List[str] ): UpperCAmelCase__ = data UpperCAmelCase__ = None UpperCAmelCase__ = None def a_ ( ): print('\n********Press N to stop entering at any point of time********\n' ) UpperCAmelCase__ = input('Enter the value of the root node: ' ).strip().lower() UpperCAmelCase__ = queue.Queue() UpperCAmelCase__ = TreeNode(int(lowerCamelCase ) ) q.put(lowerCamelCase ) while not q.empty(): UpperCAmelCase__ = q.get() UpperCAmelCase__ = f'''Enter the left node of {node_found.data}: ''' UpperCAmelCase__ = input(lowerCamelCase ).strip().lower() or 'n' if check == "n": return tree_node UpperCAmelCase__ = TreeNode(int(lowerCamelCase ) ) UpperCAmelCase__ = left_node q.put(lowerCamelCase ) UpperCAmelCase__ = f'''Enter the right node of {node_found.data}: ''' UpperCAmelCase__ = input(lowerCamelCase ).strip().lower() or 'n' if check == "n": return tree_node UpperCAmelCase__ = TreeNode(int(lowerCamelCase ) ) UpperCAmelCase__ = right_node q.put(lowerCamelCase ) raise def a_ ( lowerCamelCase ): if not isinstance(lowerCamelCase , lowerCamelCase ) or not node: return print(node.data , end=',' ) pre_order(node.left ) pre_order(node.right ) def a_ ( lowerCamelCase ): if not isinstance(lowerCamelCase , lowerCamelCase ) or not node: return in_order(node.left ) print(node.data , end=',' ) in_order(node.right ) def a_ ( lowerCamelCase ): if not isinstance(lowerCamelCase , lowerCamelCase ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=',' ) def a_ ( lowerCamelCase ): if not isinstance(lowerCamelCase , lowerCamelCase ) or not node: return UpperCAmelCase__ = queue.Queue() q.put(lowerCamelCase ) while not q.empty(): UpperCAmelCase__ = q.get() print(node_dequeued.data , end=',' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def a_ ( lowerCamelCase ): if not isinstance(lowerCamelCase , lowerCamelCase ) or not node: return UpperCAmelCase__ = queue.Queue() q.put(lowerCamelCase ) while not q.empty(): UpperCAmelCase__ = [] while not q.empty(): UpperCAmelCase__ = q.get() print(node_dequeued.data , end=',' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(lowerCamelCase ) def a_ ( lowerCamelCase ): if not isinstance(lowerCamelCase , lowerCamelCase ) or not node: return UpperCAmelCase__ = [] UpperCAmelCase__ = node while n or stack: while n: # start from root node, find its left child print(n.data , end=',' ) stack.append(lowerCamelCase ) UpperCAmelCase__ = n.left # end of while means current node doesn't have left child UpperCAmelCase__ = stack.pop() # start to traverse its right child UpperCAmelCase__ = n.right def a_ ( lowerCamelCase ): if not isinstance(lowerCamelCase , lowerCamelCase ) or not node: return UpperCAmelCase__ = [] UpperCAmelCase__ = node while n or stack: while n: stack.append(lowerCamelCase ) UpperCAmelCase__ = n.left UpperCAmelCase__ = stack.pop() print(n.data , end=',' ) UpperCAmelCase__ = n.right def a_ ( lowerCamelCase ): if not isinstance(lowerCamelCase , lowerCamelCase ) or not node: return UpperCAmelCase__ , UpperCAmelCase__ = [], [] UpperCAmelCase__ = node stacka.append(lowerCamelCase ) while stacka: # to find the reversed order of post order, store it in stack2 UpperCAmelCase__ = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(lowerCamelCase ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=',' ) def a_ ( lowerCamelCase = "" , lowerCamelCase=5_0 , lowerCamelCase="*" ): if not s: return "\n" + width * char UpperCAmelCase__ , UpperCAmelCase__ = divmod(width - len(lowerCamelCase ) - 2 , 2 ) return f'''{left * char} {s} {(left + extra) * char}''' if __name__ == "__main__": import doctest doctest.testmod() print(prompt('Binary Tree Traversals')) lowerCAmelCase__ : TreeNode = build_tree() print(prompt('Pre Order Traversal')) pre_order(node) print(prompt() + '\n') print(prompt('In Order Traversal')) in_order(node) print(prompt() + '\n') print(prompt('Post Order Traversal')) post_order(node) print(prompt() + '\n') print(prompt('Level Order Traversal')) level_order(node) print(prompt() + '\n') print(prompt('Actual Level Order Traversal')) level_order_actual(node) print('*' * 50 + '\n') print(prompt('Pre Order Traversal - Iteration Version')) pre_order_iter(node) print(prompt() + '\n') print(prompt('In Order Traversal - Iteration Version')) in_order_iter(node) print(prompt() + '\n') print(prompt('Post Order Traversal - Iteration Version')) post_order_iter(node) print(prompt())
98
from __future__ import annotations lowerCamelCase__ = """#""" class SCREAMING_SNAKE_CASE : def __init__( self : Optional[Any] ): '''simple docstring''' __a = {} def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str ): '''simple docstring''' __a = self._trie for char in text: if char not in trie: __a = {} __a = trie[char] __a = True def UpperCamelCase_ ( self : Tuple , __lowercase : str ): '''simple docstring''' __a = self._trie for char in prefix: if char in trie: __a = trie[char] else: return [] return self._elements(__lowercase ) def UpperCamelCase_ ( self : Optional[int] , __lowercase : dict ): '''simple docstring''' __a = [] for c, v in d.items(): __a = [""" """] if c == END else [(c + s) for s in self._elements(__lowercase )] result.extend(__lowercase ) return tuple(__lowercase ) lowerCamelCase__ = Trie() lowerCamelCase__ = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""") for word in words: trie.insert_word(word) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" __a = trie.find_word(_SCREAMING_SNAKE_CASE ) return tuple(string + word for word in suffixes ) def lowerCAmelCase__ ( ): """simple docstring""" print(autocomplete_using_trie("""de""" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
302
0
def A_ ( A__ , A__ ) -> int: return int((input_a, input_a).count(0 ) != 0 ) def A_ ( ) -> None: assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
99
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : torch.FloatTensor class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self : Dict , __lowercase : int = 32 , __lowercase : int = 64 , __lowercase : int = 20 , __lowercase : int = 768 , __lowercase : Any=77 , __lowercase : Optional[int]=4 , __lowercase : float = 0.0 , __lowercase : str = "silu" , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "linear" , __lowercase : Optional[str] = "prd" , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , ): '''simple docstring''' super().__init__() __a = num_attention_heads __a = attention_head_dim __a = num_attention_heads * attention_head_dim __a = additional_embeddings __a = time_embed_dim or inner_dim __a = embedding_proj_dim or embedding_dim __a = clip_embed_dim or embedding_dim __a = Timesteps(__lowercase , __lowercase , 0 ) __a = TimestepEmbedding(__lowercase , __lowercase , out_dim=__lowercase , act_fn=__lowercase ) __a = nn.Linear(__lowercase , __lowercase ) if embedding_proj_norm_type is None: __a = None elif embedding_proj_norm_type == "layer": __a = nn.LayerNorm(__lowercase ) else: raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" ) __a = nn.Linear(__lowercase , __lowercase ) if encoder_hid_proj_type is None: __a = None elif encoder_hid_proj_type == "linear": __a = nn.Linear(__lowercase , __lowercase ) else: raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" ) __a = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowercase ) ) if added_emb_type == "prd": __a = nn.Parameter(torch.zeros(1 , 1 , __lowercase ) ) elif added_emb_type is None: __a = None else: raise ValueError( F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." ) __a = nn.ModuleList( [ BasicTransformerBlock( __lowercase , __lowercase , __lowercase , dropout=__lowercase , activation_fn="""gelu""" , attention_bias=__lowercase , ) for d in range(__lowercase ) ] ) if norm_in_type == "layer": __a = nn.LayerNorm(__lowercase ) elif norm_in_type is None: __a = None else: raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." ) __a = nn.LayerNorm(__lowercase ) __a = nn.Linear(__lowercase , __lowercase ) __a = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 ) causal_attention_mask.triu_(1 ) __a = causal_attention_mask[None, ...] self.register_buffer("""causal_attention_mask""" , __lowercase , persistent=__lowercase ) __a = nn.Parameter(torch.zeros(1 , __lowercase ) ) __a = nn.Parameter(torch.zeros(1 , __lowercase ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' __a = {} def fn_recursive_add_processors(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict[str, AttentionProcessor] ): if hasattr(__lowercase , """set_processor""" ): __a = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"{name}.{sub_name}" , __lowercase , __lowercase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__lowercase , __lowercase , __lowercase ) return processors def UpperCamelCase_ ( self : List[str] , __lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ): '''simple docstring''' __a = len(self.attn_processors.keys() ) if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != count: raise ValueError( F"A dict of processors was passed, but the number of processors {len(__lowercase )} does not match the" F" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict ): if hasattr(__lowercase , """set_processor""" ): if not isinstance(__lowercase , __lowercase ): module.set_processor(__lowercase ) else: module.set_processor(processor.pop(F"{name}.processor" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"{name}.{sub_name}" , __lowercase , __lowercase ) for name, module in self.named_children(): fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase ) def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' self.set_attn_processor(AttnProcessor() ) def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[torch.Tensor, float, int] , __lowercase : torch.FloatTensor , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.BoolTensor] = None , __lowercase : bool = True , ): '''simple docstring''' __a = hidden_states.shape[0] __a = timestep if not torch.is_tensor(__lowercase ): __a = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0: __a = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __a = timesteps * torch.ones(__lowercase , dtype=timesteps.dtype , device=timesteps.device ) __a = self.time_proj(__lowercase ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. __a = timesteps_projected.to(dtype=self.dtype ) __a = self.time_embedding(__lowercase ) if self.embedding_proj_norm is not None: __a = self.embedding_proj_norm(__lowercase ) __a = self.embedding_proj(__lowercase ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: __a = self.encoder_hidden_states_proj(__lowercase ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" ) __a = self.proj_in(__lowercase ) __a = self.positional_embedding.to(hidden_states.dtype ) __a = [] __a = 0 if encoder_hidden_states is not None: additional_embeds.append(__lowercase ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: __a = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: __a = hidden_states[:, None, :] __a = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: __a = self.prd_embedding.to(hidden_states.dtype ).expand(__lowercase , -1 , -1 ) additional_embeds.append(__lowercase ) __a = torch.cat( __lowercase , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens __a = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: __a = F.pad( __lowercase , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) __a = hidden_states + positional_embeddings if attention_mask is not None: __a = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0 __a = F.pad(__lowercase , (0, self.additional_embeddings) , value=0.0 ) __a = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) __a = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: __a = self.norm_in(__lowercase ) for block in self.transformer_blocks: __a = block(__lowercase , attention_mask=__lowercase ) __a = self.norm_out(__lowercase ) if self.prd_embedding is not None: __a = hidden_states[:, -1] else: __a = hidden_states[:, additional_embeddings_len:] __a = self.proj_to_clip_embeddings(__lowercase ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=__lowercase ) def UpperCamelCase_ ( self : Any , __lowercase : Tuple ): '''simple docstring''' __a = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
302
0
"""simple docstring""" import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() __magic_name__ = logging.get_logger(__name__) set_seed(770) __magic_name__ = { "c_attn": "att_proj", "c_proj": "out_proj", "c_fc": "in_proj", "transformer.": "", "h.": "layers.", "ln_1": "layernorm_1", "ln_2": "layernorm_2", "ln_f": "layernorm_final", "wpe": "position_embeds_layer", "wte": "input_embeds_layer", } __magic_name__ = { "text_small": { "repo_id": "suno/bark", "file_name": "text.pt", }, "coarse_small": { "repo_id": "suno/bark", "file_name": "coarse.pt", }, "fine_small": { "repo_id": "suno/bark", "file_name": "fine.pt", }, "text": { "repo_id": "suno/bark", "file_name": "text_2.pt", }, "coarse": { "repo_id": "suno/bark", "file_name": "coarse_2.pt", }, "fine": { "repo_id": "suno/bark", "file_name": "fine_2.pt", }, } __magic_name__ = os.path.dirname(os.path.abspath(__file__)) __magic_name__ = os.path.join(os.path.expanduser("~"), ".cache") __magic_name__ = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0") def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_=False ): __SCREAMING_SNAKE_CASE = model_type if use_small: key += "_small" return os.path.join(UpperCamelCase_ , REMOTE_MODEL_PATHS[key]["""file_name"""] ) def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ): os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ ) hf_hub_download(repo_id=UpperCamelCase_ , filename=UpperCamelCase_ , local_dir=UpperCamelCase_ ) def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_="text" ): if model_type == "text": __SCREAMING_SNAKE_CASE = BarkSemanticModel __SCREAMING_SNAKE_CASE = BarkSemanticConfig __SCREAMING_SNAKE_CASE = BarkSemanticGenerationConfig elif model_type == "coarse": __SCREAMING_SNAKE_CASE = BarkCoarseModel __SCREAMING_SNAKE_CASE = BarkCoarseConfig __SCREAMING_SNAKE_CASE = BarkCoarseGenerationConfig elif model_type == "fine": __SCREAMING_SNAKE_CASE = BarkFineModel __SCREAMING_SNAKE_CASE = BarkFineConfig __SCREAMING_SNAKE_CASE = BarkFineGenerationConfig else: raise NotImplementedError() __SCREAMING_SNAKE_CASE = f"{model_type}_small" if use_small else model_type __SCREAMING_SNAKE_CASE = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(UpperCamelCase_ ): logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`." ) _download(model_info["""repo_id"""] , model_info["""file_name"""] ) __SCREAMING_SNAKE_CASE = torch.load(UpperCamelCase_ , map_location=UpperCamelCase_ ) # this is a hack __SCREAMING_SNAKE_CASE = checkpoint["""model_args"""] if "input_vocab_size" not in model_args: __SCREAMING_SNAKE_CASE = model_args["""vocab_size"""] __SCREAMING_SNAKE_CASE = model_args["""vocab_size"""] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments __SCREAMING_SNAKE_CASE = model_args.pop("""n_head""" ) __SCREAMING_SNAKE_CASE = model_args.pop("""n_embd""" ) __SCREAMING_SNAKE_CASE = model_args.pop("""n_layer""" ) __SCREAMING_SNAKE_CASE = ConfigClass(**checkpoint["""model_args"""] ) __SCREAMING_SNAKE_CASE = ModelClass(config=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = GenerationConfigClass() __SCREAMING_SNAKE_CASE = model_generation_config __SCREAMING_SNAKE_CASE = checkpoint["""model"""] # fixup checkpoint __SCREAMING_SNAKE_CASE = """_orig_mod.""" for k, v in list(state_dict.items() ): if k.startswith(UpperCamelCase_ ): # replace part of the key with corresponding layer name in HF implementation __SCREAMING_SNAKE_CASE = k[len(UpperCamelCase_ ) :] for old_layer_name in new_layer_name_dict: __SCREAMING_SNAKE_CASE = new_k.replace(UpperCamelCase_ , new_layer_name_dict[old_layer_name] ) __SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = set(state_dict.keys() ) - set(model.state_dict().keys() ) __SCREAMING_SNAKE_CASE = {k for k in extra_keys if not k.endswith(""".attn.bias""" )} __SCREAMING_SNAKE_CASE = set(model.state_dict().keys() ) - set(state_dict.keys() ) __SCREAMING_SNAKE_CASE = {k for k in missing_keys if not k.endswith(""".attn.bias""" )} if len(UpperCamelCase_ ) != 0: raise ValueError(f"extra keys found: {extra_keys}" ) if len(UpperCamelCase_ ) != 0: raise ValueError(f"missing keys: {missing_keys}" ) model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = model.num_parameters(exclude_embeddings=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = checkpoint["""best_val_loss"""].item() logger.info(f"model loaded: {round(n_params/1e6 , 1 )}M params, {round(UpperCamelCase_ , 3 )} loss" ) model.eval() model.to(UpperCamelCase_ ) del checkpoint, state_dict return model def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_="text" ): if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() __SCREAMING_SNAKE_CASE = """cpu""" # do conversion on cpu __SCREAMING_SNAKE_CASE = _get_ckpt_path(UpperCamelCase_ , use_small=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = _load_model(UpperCamelCase_ , UpperCamelCase_ , model_type=UpperCamelCase_ , use_small=UpperCamelCase_ ) # load bark initial model __SCREAMING_SNAKE_CASE = _bark_load_model(UpperCamelCase_ , """cpu""" , model_type=UpperCamelCase_ , use_small=UpperCamelCase_ ) if model_type == "text": __SCREAMING_SNAKE_CASE = bark_model["""model"""] if model.num_parameters(exclude_embeddings=UpperCamelCase_ ) != bark_model.get_num_params(): raise ValueError("""initial and new models don't have the same number of parameters""" ) # check if same output as the bark model __SCREAMING_SNAKE_CASE = 5 __SCREAMING_SNAKE_CASE = 10 if model_type in ["text", "coarse"]: __SCREAMING_SNAKE_CASE = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int ) __SCREAMING_SNAKE_CASE = bark_model(UpperCamelCase_ )[0] __SCREAMING_SNAKE_CASE = model(UpperCamelCase_ ) # take last logits __SCREAMING_SNAKE_CASE = output_new_model_total.logits[:, [-1], :] else: __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = 8 __SCREAMING_SNAKE_CASE = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) __SCREAMING_SNAKE_CASE = model(UpperCamelCase_ , UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = bark_model(UpperCamelCase_ , UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError("""initial and new outputs don't have the same shape""" ) if (output_new_model - output_old_model).abs().max().item() > 1e-3: raise ValueError("""initial and new outputs are not equal""" ) Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ): __SCREAMING_SNAKE_CASE = os.path.join(UpperCamelCase_ , UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = BarkSemanticConfig.from_pretrained(os.path.join(UpperCamelCase_ , """config.json""" ) ) __SCREAMING_SNAKE_CASE = BarkCoarseConfig.from_pretrained(os.path.join(UpperCamelCase_ , """config.json""" ) ) __SCREAMING_SNAKE_CASE = BarkFineConfig.from_pretrained(os.path.join(UpperCamelCase_ , """config.json""" ) ) __SCREAMING_SNAKE_CASE = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" ) __SCREAMING_SNAKE_CASE = BarkSemanticModel.from_pretrained(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = BarkCoarseModel.from_pretrained(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = BarkFineModel.from_pretrained(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = EncodecModel.from_pretrained("""facebook/encodec_24khz""" ) __SCREAMING_SNAKE_CASE = BarkConfig.from_sub_model_configs( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) __SCREAMING_SNAKE_CASE = BarkModel(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = semantic __SCREAMING_SNAKE_CASE = coarseAcoustic __SCREAMING_SNAKE_CASE = fineAcoustic __SCREAMING_SNAKE_CASE = codec __SCREAMING_SNAKE_CASE = bark_generation_config Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ ) bark.save_pretrained(UpperCamelCase_ , repo_id=UpperCamelCase_ , push_to_hub=UpperCamelCase_ ) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument("model_type", type=str, help="text, coarse or fine.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.") __magic_name__ = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
100
from functools import lru_cache def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" __a = 2 __a = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(_SCREAMING_SNAKE_CASE ) if n > 1: factors.add(_SCREAMING_SNAKE_CASE ) return factors @lru_cache def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" return len(unique_prime_factors(_SCREAMING_SNAKE_CASE ) ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ): """simple docstring""" return len(set(_SCREAMING_SNAKE_CASE ) ) in (0, 1) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" __a = 2 while True: # Increment each value of a generated range __a = [base + i for i in range(_SCREAMING_SNAKE_CASE )] # Run elements through out unique_prime_factors function # Append our target number to the end. __a = [upf_len(_SCREAMING_SNAKE_CASE ) for x in group] checker.append(_SCREAMING_SNAKE_CASE ) # If all numbers in the list are equal, return the group variable. if equality(_SCREAMING_SNAKE_CASE ): return group # Increment our base variable by 1 base += 1 def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 4 ): """simple docstring""" __a = run(_SCREAMING_SNAKE_CASE ) return results[0] if len(_SCREAMING_SNAKE_CASE ) else None if __name__ == "__main__": print(solution())
302
0