code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from manim import * class __snake_case ( _lowercase): def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : List[Any] = Rectangle(height=0.5 , width=0.5 ) _lowerCamelCase : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _lowerCamelCase : List[Any] = Rectangle(height=0.25 , width=0.25 ) _lowerCamelCase : int = [mem.copy() for i in range(6 )] _lowerCamelCase : List[str] = [mem.copy() for i in range(6 )] _lowerCamelCase : Any = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) _lowerCamelCase : Dict = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) _lowerCamelCase : Any = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) _lowerCamelCase : List[str] = Text('''CPU''' , font_size=2_4 ) _lowerCamelCase : Any = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = [mem.copy() for i in range(4 )] _lowerCamelCase : List[str] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) _lowerCamelCase : List[str] = Text('''GPU''' , font_size=2_4 ) _lowerCamelCase : Optional[int] = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(__lowerCAmelCase ) _lowerCamelCase : List[str] = [mem.copy() for i in range(6 )] _lowerCamelCase : Optional[int] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) _lowerCamelCase : Optional[Any] = Text('''Model''' , font_size=2_4 ) _lowerCamelCase : Tuple = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(__lowerCAmelCase ) _lowerCamelCase : List[str] = [] _lowerCamelCase : List[str] = [] for i, rect in enumerate(__lowerCAmelCase ): _lowerCamelCase : List[str] = fill.copy().set_fill(__lowerCAmelCase , opacity=0.8 ) target.move_to(__lowerCAmelCase ) model_arr.append(__lowerCAmelCase ) _lowerCamelCase : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(__lowerCAmelCase ) self.add(*__lowerCAmelCase , *__lowerCAmelCase ) _lowerCamelCase : Dict = [meta_mem.copy() for i in range(6 )] _lowerCamelCase : Dict = [meta_mem.copy() for i in range(6 )] _lowerCamelCase : Optional[Any] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) _lowerCamelCase : Optional[int] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) _lowerCamelCase : Optional[Any] = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) _lowerCamelCase : str = Text('''Disk''' , font_size=2_4 ) _lowerCamelCase : List[Any] = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase ) disk.move_to([-4, -1.25, 0] ) self.add(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Optional[int] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _lowerCamelCase : List[Any] = MarkupText( f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : List[Any] = MarkupText( f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , ) blue_text.next_to(__lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__lowerCAmelCase ) _lowerCamelCase : Tuple = MarkupText( f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowerCAmelCase ) ) _lowerCamelCase : Union[str, Any] = Square(0.3 ) input.set_fill(__lowerCAmelCase , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , __lowerCAmelCase , buff=0.5 ) self.play(Write(__lowerCAmelCase ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=__lowerCAmelCase , buff=0.02 ) self.play(MoveToTarget(__lowerCAmelCase ) ) self.play(FadeOut(__lowerCAmelCase ) ) _lowerCamelCase : List[Any] = Arrow(start=__lowerCAmelCase , end=__lowerCAmelCase , color=__lowerCAmelCase , buff=0.5 ) a.next_to(model_arr[0].get_left() , __lowerCAmelCase , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) _lowerCamelCase : Any = MarkupText( f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowerCAmelCase , run_time=3 ) ) _lowerCamelCase : List[str] = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02} self.play( Write(__lowerCAmelCase ) , Circumscribe(model_arr[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) _lowerCamelCase : Optional[int] = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , __lowerCAmelCase , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) _lowerCamelCase : int = AnimationGroup( FadeOut(__lowerCAmelCase , run_time=0.5 ) , MoveToTarget(__lowerCAmelCase , run_time=0.5 ) , FadeIn(__lowerCAmelCase , run_time=0.5 ) , lag_ratio=0.2 ) self.play(__lowerCAmelCase ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: _lowerCamelCase : Dict = 0.7 self.play( Circumscribe(model_arr[i] , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(model_arr[i + 1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) _lowerCamelCase : Dict = a_c _lowerCamelCase : List[str] = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(__lowerCAmelCase ) , FadeOut(__lowerCAmelCase , run_time=0.5 ) , ) _lowerCamelCase : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=2_4 ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowerCAmelCase , run_time=3 ) , MoveToTarget(__lowerCAmelCase ) ) self.wait()
72
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase__ = logging.get_logger(__name__) # TODO: upload to AWS lowerCAmelCase__ = { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json''' ), } class __snake_case ( _lowercase): snake_case__ : int = "retribert" def __init__( self : Optional[int] , __lowerCAmelCase : str=3_0_5_2_2 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1E-12 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : str , ): """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) _lowerCamelCase : Dict = vocab_size _lowerCamelCase : Union[str, Any] = hidden_size _lowerCamelCase : Dict = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : int = hidden_act _lowerCamelCase : str = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : List[Any] = attention_probs_dropout_prob _lowerCamelCase : Optional[int] = max_position_embeddings _lowerCamelCase : List[Any] = type_vocab_size _lowerCamelCase : Any = initializer_range _lowerCamelCase : Optional[int] = layer_norm_eps _lowerCamelCase : int = share_encoders _lowerCamelCase : Optional[Any] = projection_dim
72
1
"""simple docstring""" from PIL import Image def snake_case_ ( A_ : Image, A_ : float ): '''simple docstring''' def brightness(A_ : int ) -> float: return 1_28 + level + (c - 1_28) if not -255.0 <= level <= 255.0: raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' ) return img.point(A_ ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change brightness to 100 lowerCAmelCase__ = change_brightness(img, 100) brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
72
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : Optional[int] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : str = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] _lowerCamelCase : Optional[Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : Tuple = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _lowerCamelCase : Any = '''fp16''' self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] _lowerCamelCase : str = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] _lowerCamelCase : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : int = '''fp16''' self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
72
1
"""simple docstring""" # Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar lowerCAmelCase__ = TypeVar('''T''') class __snake_case ( Generic[T]): def __init__( self : Tuple , __lowerCAmelCase : bool = True ): """simple docstring""" _lowerCamelCase : dict[T, list[T]] = {} # dictionary of lists _lowerCamelCase : Any = directed def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : T ): """simple docstring""" if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(__lowerCAmelCase ) self.adj_list[destination_vertex].append(__lowerCAmelCase ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(__lowerCAmelCase ) _lowerCamelCase : List[Any] = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: _lowerCamelCase : Any = [destination_vertex] _lowerCamelCase : Optional[int] = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(__lowerCAmelCase ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: _lowerCamelCase : List[Any] = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: _lowerCamelCase : Any = [destination_vertex] _lowerCamelCase : List[str] = [] return self def __repr__( self : Dict ): """simple docstring""" return pformat(self.adj_list )
72
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class __snake_case : def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=1_0 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=3_2 * 4 , __lowerCAmelCase : Dict=3_2 * 6 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=3_2 , ): """simple docstring""" _lowerCamelCase : List[str] = parent _lowerCamelCase : str = batch_size _lowerCamelCase : Dict = is_training _lowerCamelCase : str = use_auxiliary_loss _lowerCamelCase : Any = num_queries _lowerCamelCase : List[Any] = num_channels _lowerCamelCase : int = min_size _lowerCamelCase : Any = max_size _lowerCamelCase : int = num_labels _lowerCamelCase : List[str] = mask_feature_size def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowerCAmelCase ) _lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5 ).float() _lowerCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long() _lowerCamelCase : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.prepare_config_and_inputs() _lowerCamelCase : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = output.encoder_hidden_states _lowerCamelCase : Tuple = output.pixel_decoder_hidden_states _lowerCamelCase : Dict = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers ) def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=False ): """simple docstring""" with torch.no_grad(): _lowerCamelCase : Optional[int] = MaskFormerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Tuple = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) _lowerCamelCase : List[str] = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() def comm_check_on_output(__lowerCAmelCase : Dict ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _lowerCamelCase : str = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) _lowerCamelCase : List[str] = model( pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __snake_case ( _lowercase , _lowercase , unittest.TestCase): snake_case__ : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () snake_case__ : Any = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) snake_case__ : List[str] = False snake_case__ : List[str] = False snake_case__ : Optional[int] = False snake_case__ : Dict = False def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = MaskFormerModelTester(self ) _lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Dict = [*signature.parameters.keys()] _lowerCamelCase : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: _lowerCamelCase : Union[str, Any] = MaskFormerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2 _lowerCamelCase : Union[str, Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=__lowerCAmelCase ), '''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowerCAmelCase ), '''class_labels''': torch.zeros(2 , 1_0 , device=__lowerCAmelCase ).long(), } _lowerCamelCase : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _lowerCamelCase : Union[str, Any] = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : Any = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _lowerCamelCase : List[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : int = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[int] = True _lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) _lowerCamelCase : List[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCamelCase : Tuple = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _lowerCamelCase : List[str] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCamelCase : Optional[int] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) lowerCAmelCase__ = 1E-4 def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class __snake_case ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : str = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowerCAmelCase ) _lowerCamelCase : Any = self.default_image_processor _lowerCamelCase : List[Any] = prepare_img() _lowerCamelCase : Any = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : Any = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : int = model(**__lowerCAmelCase ) _lowerCamelCase : str = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) _lowerCamelCase : Union[str, Any] = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) _lowerCamelCase : Optional[int] = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[int] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : str = prepare_img() _lowerCamelCase : int = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : str = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : Optional[int] = model(**__lowerCAmelCase ) # masks_queries_logits _lowerCamelCase : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCamelCase : List[str] = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] _lowerCamelCase : Any = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits _lowerCamelCase : List[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCamelCase : str = torch.tensor( [ [1.6512E00, -5.2572E00, -3.3519E00], [3.6169E-02, -5.9025E00, -2.9313E00], [1.0766E-04, -7.7630E00, -5.1263E00], ] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Any = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : Tuple = self.default_image_processor _lowerCamelCase : Tuple = prepare_img() _lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : List[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase ) # masks_queries_logits _lowerCamelCase : List[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCamelCase : int = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] _lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits _lowerCamelCase : Dict = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCamelCase : Any = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : List[str] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , ) _lowerCamelCase : Union[str, Any] = inputs['''pixel_values'''].to(__lowerCAmelCase ) _lowerCamelCase : Dict = [el.to(__lowerCAmelCase ) for el in inputs['''mask_labels''']] _lowerCamelCase : Optional[Any] = [el.to(__lowerCAmelCase ) for el in inputs['''class_labels''']] with torch.no_grad(): _lowerCamelCase : Tuple = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
72
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { '''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''], '''tokenization_deberta''': ['''DebertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''DebertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DebertaForMaskedLM''', '''DebertaForQuestionAnswering''', '''DebertaForSequenceClassification''', '''DebertaForTokenClassification''', '''DebertaModel''', '''DebertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDebertaForMaskedLM''', '''TFDebertaForQuestionAnswering''', '''TFDebertaForSequenceClassification''', '''TFDebertaForTokenClassification''', '''TFDebertaModel''', '''TFDebertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
"""simple docstring""" lowerCAmelCase__ = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def snake_case_ ( A_ : dict, A_ : int, A_ : int ): '''simple docstring''' _lowerCamelCase : List[str] = set() # keep track of all the paths to be checked _lowerCamelCase : str = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _lowerCamelCase : str = queue.pop(0 ) # get the last node from the path _lowerCamelCase : List[Any] = path[-1] if node not in explored: _lowerCamelCase : Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _lowerCamelCase : Union[str, Any] = list(A_ ) new_path.append(A_ ) queue.append(A_ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(A_ ) # in case there's no path between the 2 nodes return [] def snake_case_ ( A_ : dict, A_ : int, A_ : Dict ): '''simple docstring''' if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _lowerCamelCase : Optional[int] = [start] _lowerCamelCase : int = set(A_ ) # Keep tab on distances from `start` node. _lowerCamelCase : int = {start: 0, target: -1} while queue: _lowerCamelCase : Optional[Any] = queue.pop(0 ) if node == target: _lowerCamelCase : Any = ( dist[node] if dist[target] == -1 else min(dist[target], dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(A_ ) queue.append(A_ ) _lowerCamelCase : Any = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
72
1
"""simple docstring""" import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger lowerCAmelCase__ = get_logger(__name__) lowerCAmelCase__ = Path(__file__).parent / '''model_card_template.md''' lowerCAmelCase__ = uuida().hex lowerCAmelCase__ = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES lowerCAmelCase__ = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES lowerCAmelCase__ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/''' def snake_case_ ( A_ : Union[Dict, str, None] = None ): '''simple docstring''' _lowerCamelCase : List[Any] = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}''' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += F'''; torch/{_torch_version}''' if is_flax_available(): ua += F'''; jax/{_jax_version}''' ua += F'''; flax/{_flax_version}''' if is_onnx_available(): ua += F'''; onnxruntime/{_onnxruntime_version}''' # CI will set this value to True if os.environ.get('''DIFFUSERS_IS_CI''', '''''' ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(A_, A_ ): ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() ) elif isinstance(A_, A_ ): ua += "; " + user_agent return ua def snake_case_ ( A_ : str, A_ : Optional[str] = None, A_ : Optional[str] = None ): '''simple docstring''' if token is None: _lowerCamelCase : str = HfFolder.get_token() if organization is None: _lowerCamelCase : Optional[Any] = whoami(A_ )['''name'''] return F'''{username}/{model_id}''' else: return F'''{organization}/{model_id}''' def snake_case_ ( A_ : Optional[Any], A_ : Any ): '''simple docstring''' if not is_jinja_available(): raise ValueError( '''Modelcard rendering is based on Jinja templates.''' ''' Please make sure to have `jinja` installed before using `create_model_card`.''' ''' To install it, please run `pip install Jinja2`.''' ) if hasattr(A_, '''local_rank''' ) and args.local_rank not in [-1, 0]: return _lowerCamelCase : Dict = args.hub_token if hasattr(A_, '''hub_token''' ) else None _lowerCamelCase : str = get_full_repo_name(A_, token=A_ ) _lowerCamelCase : List[str] = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language='''en''', license='''apache-2.0''', library_name='''diffusers''', tags=[], datasets=args.dataset_name, metrics=[], ), template_path=A_, model_name=A_, repo_name=A_, dataset_name=args.dataset_name if hasattr(A_, '''dataset_name''' ) else None, learning_rate=args.learning_rate, train_batch_size=args.train_batch_size, eval_batch_size=args.eval_batch_size, gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(A_, '''gradient_accumulation_steps''' ) else None ), adam_betaa=args.adam_betaa if hasattr(A_, '''adam_beta1''' ) else None, adam_betaa=args.adam_betaa if hasattr(A_, '''adam_beta2''' ) else None, adam_weight_decay=args.adam_weight_decay if hasattr(A_, '''adam_weight_decay''' ) else None, adam_epsilon=args.adam_epsilon if hasattr(A_, '''adam_epsilon''' ) else None, lr_scheduler=args.lr_scheduler if hasattr(A_, '''lr_scheduler''' ) else None, lr_warmup_steps=args.lr_warmup_steps if hasattr(A_, '''lr_warmup_steps''' ) else None, ema_inv_gamma=args.ema_inv_gamma if hasattr(A_, '''ema_inv_gamma''' ) else None, ema_power=args.ema_power if hasattr(A_, '''ema_power''' ) else None, ema_max_decay=args.ema_max_decay if hasattr(A_, '''ema_max_decay''' ) else None, mixed_precision=args.mixed_precision, ) _lowerCamelCase : Optional[int] = os.path.join(args.output_dir, '''README.md''' ) model_card.save(A_ ) def snake_case_ ( A_ : Optional[str], A_ : Optional[str] = None ): '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash _lowerCamelCase : Union[str, Any] = str(Path(A_ ).as_posix() ) _lowerCamelCase : int = re.search(R'''snapshots/([^/]+)/''', A_ ) if search is None: return None _lowerCamelCase : Dict = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(A_ ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. lowerCAmelCase__ = os.path.expanduser( os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface''')) ) lowerCAmelCase__ = os.path.join(hf_cache_home, '''diffusers''') def snake_case_ ( A_ : Optional[str] = None, A_ : Optional[str] = None ): '''simple docstring''' if new_cache_dir is None: _lowerCamelCase : List[str] = DIFFUSERS_CACHE if old_cache_dir is None: _lowerCamelCase : Union[str, Any] = old_diffusers_cache _lowerCamelCase : Tuple = Path(A_ ).expanduser() _lowerCamelCase : Dict = Path(A_ ).expanduser() for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): _lowerCamelCase : str = new_cache_dir / old_blob_path.relative_to(A_ ) new_blob_path.parent.mkdir(parents=A_, exist_ok=A_ ) os.replace(A_, A_ ) try: os.symlink(A_, A_ ) except OSError: logger.warning( '''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). lowerCAmelCase__ = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''') if not os.path.isfile(cache_version_file): lowerCAmelCase__ = 0 else: with open(cache_version_file) as f: try: lowerCAmelCase__ = int(f.read()) except ValueError: lowerCAmelCase__ = 0 if cache_version < 1: lowerCAmelCase__ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( '''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ''' '''existing cached models. This is a one-time operation, you can interrupt it or run it ''' '''later by calling `diffusers.utils.hub_utils.move_cache()`.''' ) try: move_cache() except Exception as e: lowerCAmelCase__ = '''\n'''.join(traceback.format_tb(e.__traceback__)) logger.error( F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """ '''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ''' '''message and we will do our best to help.''' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, '''w''') as f: f.write('''1''') except Exception: logger.warning( F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """ '''the directory exists and can be written to.''' ) def snake_case_ ( A_ : str, A_ : Optional[str] = None ): '''simple docstring''' if variant is not None: _lowerCamelCase : List[Any] = weights_name.split('''.''' ) _lowerCamelCase : Union[str, Any] = splits[:-1] + [variant] + splits[-1:] _lowerCamelCase : str = '''.'''.join(A_ ) return weights_name def snake_case_ ( A_ : str, *, A_ : Optional[Any], A_ : Optional[Any], A_ : Optional[int], A_ : Any, A_ : str, A_ : Optional[int], A_ : Tuple, A_ : List[Any], A_ : Optional[int], A_ : List[str], A_ : List[str]=None, ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = str(A_ ) if os.path.isfile(A_ ): return pretrained_model_name_or_path elif os.path.isdir(A_ ): if os.path.isfile(os.path.join(A_, A_ ) ): # Load from a PyTorch checkpoint _lowerCamelCase : Optional[Any] = os.path.join(A_, A_ ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(A_, A_, A_ ) ): _lowerCamelCase : List[str] = os.path.join(A_, A_, A_ ) return model_file else: raise EnvironmentError( F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(A_ ).base_version ) >= version.parse('''0.20.0''' ) ): try: _lowerCamelCase : str = hf_hub_download( A_, filename=_add_variant(A_, A_ ), cache_dir=A_, force_download=A_, proxies=A_, resume_download=A_, local_files_only=A_, use_auth_token=A_, user_agent=A_, subfolder=A_, revision=revision or commit_hash, ) warnings.warn( F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''', A_, ) return model_file except: # noqa: E722 warnings.warn( F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(A_, A_ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(A_, A_ )}\' so that the correct variant file can be added.''', A_, ) try: # 2. Load model file as usual _lowerCamelCase : int = hf_hub_download( A_, filename=A_, cache_dir=A_, force_download=A_, proxies=A_, resume_download=A_, local_files_only=A_, use_auth_token=A_, user_agent=A_, subfolder=A_, revision=revision or commit_hash, ) return model_file except RepositoryNotFoundError: raise EnvironmentError( F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ''' '''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a ''' '''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli ''' '''login`.''' ) except RevisionNotFoundError: raise EnvironmentError( F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ''' '''this model name. Check the model page at ''' F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' ) except EntryNotFoundError: raise EnvironmentError( F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' ) except HTTPError as err: raise EnvironmentError( F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' ) except ValueError: raise EnvironmentError( F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it''' F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a''' F''' directory containing a file named {weights_name} or''' ''' \nCheckout your internet connection or see how to run the library in''' ''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' ) except EnvironmentError: raise EnvironmentError( F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ''' '''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. ''' F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ''' F'''containing a file named {weights_name}''' )
72
"""simple docstring""" import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def snake_case_ ( A_ : Tuple, A_ : int, A_ : Dict ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = LxmertConfig.from_json_file(A_ ) print(F'''Building PyTorch model from configuration: {config}''' ) _lowerCamelCase : List[str] = LxmertForPreTraining(A_ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(A_, A_, A_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), A_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
72
1
"""simple docstring""" from __future__ import annotations import math def snake_case_ ( A_ : float, A_ : int ): '''simple docstring''' _lowerCamelCase : Tuple = u for i in range(1, A_ ): _lowerCamelCase : int = temp * (u - i) return temp def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Tuple = int(input('''enter the numbers of values: ''' ) ) _lowerCamelCase : list[list[float]] = [] for _ in range(A_ ): y.append([] ) for i in range(A_ ): for j in range(A_ ): y[i].append(A_ ) _lowerCamelCase : Optional[int] = 0 print('''enter the values of parameters in a list: ''' ) _lowerCamelCase : Optional[int] = list(map(A_, input().split() ) ) print('''enter the values of corresponding parameters: ''' ) for i in range(A_ ): _lowerCamelCase : Dict = float(input() ) _lowerCamelCase : Tuple = int(input('''enter the value to interpolate: ''' ) ) _lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1, A_ ): for j in range(n - i ): _lowerCamelCase : Optional[Any] = y[j + 1][i - 1] - y[j][i - 1] _lowerCamelCase : Any = y[0][0] for i in range(1, A_ ): summ += (ucal(A_, A_ ) * y[0][i]) / math.factorial(A_ ) print(F'''the value at {value} is {summ}''' ) if __name__ == "__main__": main()
72
"""simple docstring""" from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def snake_case_ ( ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : int = 9, 14 # noqa: F841 _lowerCamelCase : List[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _lowerCamelCase : Any = defaultdict(A_ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) _lowerCamelCase : List[str] = mst(A_ ) _lowerCamelCase : Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _lowerCamelCase : int = tuple(answer[:2] ) _lowerCamelCase : int = tuple(edge[::-1] ) assert edge in result or reverse in result
72
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase__ = { '''vocab_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt''' ), '''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''', '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ = { '''squeezebert/squeezebert-uncased''': 512, '''squeezebert/squeezebert-mnli''': 512, '''squeezebert/squeezebert-mnli-headless''': 512, } lowerCAmelCase__ = { '''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True}, } class __snake_case ( _lowercase): snake_case__ : Optional[Any] = VOCAB_FILES_NAMES snake_case__ : List[str] = PRETRAINED_VOCAB_FILES_MAP snake_case__ : Tuple = PRETRAINED_INIT_CONFIGURATION snake_case__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ : List[str] = SqueezeBertTokenizer def __init__( self : Any , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]="[UNK]" , __lowerCAmelCase : List[str]="[SEP]" , __lowerCAmelCase : str="[PAD]" , __lowerCAmelCase : Union[str, Any]="[CLS]" , __lowerCAmelCase : Dict="[MASK]" , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : List[Any] , ): """simple docstring""" super().__init__( __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , tokenize_chinese_chars=__lowerCAmelCase , strip_accents=__lowerCAmelCase , **__lowerCAmelCase , ) _lowerCamelCase : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , __lowerCAmelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , __lowerCAmelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __lowerCAmelCase ) != tokenize_chinese_chars ): _lowerCamelCase : Dict = getattr(__lowerCAmelCase , normalizer_state.pop('''type''' ) ) _lowerCamelCase : Tuple = do_lower_case _lowerCamelCase : Any = strip_accents _lowerCamelCase : List[Any] = tokenize_chinese_chars _lowerCamelCase : Union[str, Any] = normalizer_class(**__lowerCAmelCase ) _lowerCamelCase : Dict = do_lower_case def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=None ): """simple docstring""" _lowerCamelCase : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ): """simple docstring""" _lowerCamelCase : Dict = [self.sep_token_id] _lowerCamelCase : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ): """simple docstring""" _lowerCamelCase : List[Any] = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase ) return tuple(__lowerCAmelCase )
72
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all BART models at https://huggingface.co/models?filter=bart lowerCAmelCase__ = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''', }, } lowerCAmelCase__ = { '''facebook/bart-base''': 1024, '''facebook/bart-large''': 1024, '''facebook/bart-large-mnli''': 1024, '''facebook/bart-large-cnn''': 1024, '''facebook/bart-large-xsum''': 1024, '''yjernite/bart_eli5''': 1024, } class __snake_case ( _lowercase): snake_case__ : Any = VOCAB_FILES_NAMES snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ : Optional[int] = ["input_ids", "attention_mask"] snake_case__ : Any = BartTokenizer def __init__( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]="replace" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Union[str, Any]="<unk>" , __lowerCAmelCase : Any="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Optional[Any] , ): """simple docstring""" super().__init__( __lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , ) _lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space: _lowerCamelCase : Dict = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) ) _lowerCamelCase : Any = add_prefix_space _lowerCamelCase : int = pre_tok_class(**__lowerCAmelCase ) _lowerCamelCase : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _lowerCamelCase : List[str] = '''post_processor''' _lowerCamelCase : List[str] = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) if tokenizer_component_instance: _lowerCamelCase : int = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCamelCase : Tuple = tuple(state['''sep'''] ) if "cls" in state: _lowerCamelCase : int = tuple(state['''cls'''] ) _lowerCamelCase : Union[str, Any] = False if state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space: _lowerCamelCase : Dict = add_prefix_space _lowerCamelCase : Optional[Any] = True if state.get('''trim_offsets''' , __lowerCAmelCase ) != trim_offsets: _lowerCamelCase : Any = trim_offsets _lowerCamelCase : str = True if changes_to_apply: _lowerCamelCase : List[str] = getattr(__lowerCAmelCase , state.pop('''type''' ) ) _lowerCamelCase : str = component_class(**__lowerCAmelCase ) setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : Tuple = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value _lowerCamelCase : str = value def SCREAMING_SNAKE_CASE ( self : int , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : Any = kwargs.get('''is_split_into_words''' , __lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ): """simple docstring""" _lowerCamelCase : Tuple = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase ) return tuple(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=None ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ): """simple docstring""" _lowerCamelCase : List[str] = [self.sep_token_id] _lowerCamelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
72
1
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __snake_case ( _lowercase): snake_case__ : List[str] = "unispeech" def __init__( self : List[str] , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Dict=1E-5 , __lowerCAmelCase : Optional[int]="group" , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=1_2_8 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=3_2_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=1_0_0 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict="mean" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Dict=8_0 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=0.5 , **__lowerCAmelCase : Optional[Any] , ): """simple docstring""" super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase ) _lowerCamelCase : Dict = hidden_size _lowerCamelCase : Any = feat_extract_norm _lowerCamelCase : List[Any] = feat_extract_activation _lowerCamelCase : Any = list(__lowerCAmelCase ) _lowerCamelCase : Tuple = list(__lowerCAmelCase ) _lowerCamelCase : int = list(__lowerCAmelCase ) _lowerCamelCase : List[str] = conv_bias _lowerCamelCase : List[str] = num_conv_pos_embeddings _lowerCamelCase : Tuple = num_conv_pos_embedding_groups _lowerCamelCase : List[str] = len(self.conv_dim ) _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : List[Any] = intermediate_size _lowerCamelCase : Dict = hidden_act _lowerCamelCase : Union[str, Any] = num_attention_heads _lowerCamelCase : Tuple = hidden_dropout _lowerCamelCase : List[Any] = attention_dropout _lowerCamelCase : Optional[int] = activation_dropout _lowerCamelCase : Optional[Any] = feat_proj_dropout _lowerCamelCase : Optional[int] = final_dropout _lowerCamelCase : Any = layerdrop _lowerCamelCase : Any = layer_norm_eps _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : List[str] = num_ctc_classes _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Optional[Any] = do_stable_layer_norm _lowerCamelCase : Tuple = use_weighted_layer_sum _lowerCamelCase : List[Any] = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCamelCase : Any = apply_spec_augment _lowerCamelCase : Dict = mask_time_prob _lowerCamelCase : List[str] = mask_time_length _lowerCamelCase : Optional[Any] = mask_time_min_masks _lowerCamelCase : List[str] = mask_feature_prob _lowerCamelCase : int = mask_feature_length _lowerCamelCase : Dict = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _lowerCamelCase : Optional[Any] = num_codevectors_per_group _lowerCamelCase : int = num_codevector_groups _lowerCamelCase : List[Any] = contrastive_logits_temperature _lowerCamelCase : List[str] = feat_quantizer_dropout _lowerCamelCase : Dict = num_negatives _lowerCamelCase : Optional[int] = codevector_dim _lowerCamelCase : List[Any] = proj_codevector_dim _lowerCamelCase : List[Any] = diversity_loss_weight # ctc loss _lowerCamelCase : Union[str, Any] = ctc_loss_reduction _lowerCamelCase : Any = ctc_zero_infinity # pretraining loss _lowerCamelCase : str = replace_prob @property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
72
"""simple docstring""" from __future__ import annotations def snake_case_ ( A_ : str ): '''simple docstring''' return [ord(A_ ) - 96 for elem in plain] def snake_case_ ( A_ : list[int] ): '''simple docstring''' return "".join(chr(elem + 96 ) for elem in encoded ) def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Dict = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''', A_ ) print('''Decoded:''', decode(A_ ) ) if __name__ == "__main__": main()
72
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase__ = logging.get_logger(__name__) def snake_case_ ( A_ : Dict ): '''simple docstring''' if isinstance(A_, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(A_, (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(A_ ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class __snake_case ( _lowercase): snake_case__ : Dict = ["pixel_values"] def __init__( self : str , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[int, float] = 1 / 2_5_5 , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , **__lowerCAmelCase : Tuple , ): """simple docstring""" super().__init__(**__lowerCAmelCase ) _lowerCamelCase : Optional[int] = size if size is not None else {'''shortest_edge''': 2_2_4} _lowerCamelCase : List[Any] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} _lowerCamelCase : str = get_size_dict(__lowerCAmelCase , param_name='''crop_size''' ) _lowerCamelCase : int = do_resize _lowerCamelCase : Optional[Any] = size _lowerCamelCase : Optional[int] = do_center_crop _lowerCamelCase : Optional[Any] = crop_size _lowerCamelCase : str = resample _lowerCamelCase : Any = do_rescale _lowerCamelCase : List[Any] = rescale_factor _lowerCamelCase : List[Any] = do_normalize _lowerCamelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : List[Any] , ): """simple docstring""" _lowerCamelCase : Optional[Any] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase ) if "shortest_edge" in size: _lowerCamelCase : Tuple = get_resize_output_image_size(__lowerCAmelCase , size['''shortest_edge'''] , default_to_square=__lowerCAmelCase ) elif "height" in size and "width" in size: _lowerCamelCase : str = (size['''height'''], size['''width''']) else: raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Dict , ): """simple docstring""" _lowerCamelCase : int = get_size_dict(__lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(__lowerCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Union[int, float] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Dict , ): """simple docstring""" return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Union[float, List[float]] , __lowerCAmelCase : Union[float, List[float]] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Tuple , ): """simple docstring""" return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : ImageInput , __lowerCAmelCase : bool = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : float = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ): """simple docstring""" if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _lowerCamelCase : Any = to_numpy_array(__lowerCAmelCase ) if do_resize: _lowerCamelCase : Tuple = self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) if do_center_crop: _lowerCamelCase : List[str] = self.center_crop(__lowerCAmelCase , size=__lowerCAmelCase ) if do_rescale: _lowerCamelCase : Optional[Any] = self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) if do_normalize: _lowerCamelCase : int = self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) _lowerCamelCase : str = to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) return image def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : ImageInput , __lowerCAmelCase : bool = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : float = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCAmelCase : Any , ): """simple docstring""" _lowerCamelCase : List[Any] = do_resize if do_resize is not None else self.do_resize _lowerCamelCase : str = resample if resample is not None else self.resample _lowerCamelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCamelCase : Any = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize _lowerCamelCase : Dict = image_mean if image_mean is not None else self.image_mean _lowerCamelCase : int = image_std if image_std is not None else self.image_std _lowerCamelCase : List[str] = size if size is not None else self.size _lowerCamelCase : str = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase ) _lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size _lowerCamelCase : str = get_size_dict(__lowerCAmelCase , param_name='''crop_size''' ) if not valid_images(__lowerCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) _lowerCamelCase : Any = make_batched(__lowerCAmelCase ) _lowerCamelCase : Tuple = [ [ self._preprocess_image( image=__lowerCAmelCase , do_resize=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , do_center_crop=__lowerCAmelCase , crop_size=__lowerCAmelCase , do_rescale=__lowerCAmelCase , rescale_factor=__lowerCAmelCase , do_normalize=__lowerCAmelCase , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase , data_format=__lowerCAmelCase , ) for img in video ] for video in videos ] _lowerCamelCase : List[str] = {'''pixel_values''': videos} return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
72
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { '''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''], '''tokenization_lxmert''': ['''LxmertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''LxmertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''LxmertEncoder''', '''LxmertForPreTraining''', '''LxmertForQuestionAnswering''', '''LxmertModel''', '''LxmertPreTrainedModel''', '''LxmertVisualFeatureEncoder''', '''LxmertXLayer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLxmertForPreTraining''', '''TFLxmertMainLayer''', '''TFLxmertModel''', '''TFLxmertPreTrainedModel''', '''TFLxmertVisualFeatureEncoder''', ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
1
"""simple docstring""" def snake_case_ ( A_ : int, A_ : int ): '''simple docstring''' if b == 0: return 1 if (b % 2) == 0: return actual_power(A_, int(b / 2 ) ) * actual_power(A_, int(b / 2 ) ) else: return a * actual_power(A_, int(b / 2 ) ) * actual_power(A_, int(b / 2 ) ) def snake_case_ ( A_ : int, A_ : int ): '''simple docstring''' if b < 0: return 1 / actual_power(A_, A_ ) return actual_power(A_, A_ ) if __name__ == "__main__": print(power(-2, -3))
72
"""simple docstring""" def snake_case_ ( A_ : list[list[float]] ): '''simple docstring''' _lowerCamelCase : list[list[float]] = [] for data in source_data: for i, el in enumerate(A_ ): if len(A_ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(A_ ) ) return data_lists def snake_case_ ( A_ : list[list[float]], A_ : list[int] ): '''simple docstring''' _lowerCamelCase : list[list[float]] = [] for dlist, weight in zip(A_, A_ ): _lowerCamelCase : Any = min(A_ ) _lowerCamelCase : Optional[Any] = max(A_ ) _lowerCamelCase : list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: _lowerCamelCase : str = F'''Invalid weight of {weight:f} provided''' raise ValueError(A_ ) score_lists.append(A_ ) return score_lists def snake_case_ ( A_ : list[list[float]] ): '''simple docstring''' _lowerCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(A_ ): _lowerCamelCase : List[str] = final_scores[j] + ele return final_scores def snake_case_ ( A_ : list[list[float]], A_ : list[int] ): '''simple docstring''' _lowerCamelCase : Tuple = get_data(A_ ) _lowerCamelCase : Optional[Any] = calculate_each_score(A_, A_ ) _lowerCamelCase : str = generate_final_scores(A_ ) # append scores to source data for i, ele in enumerate(A_ ): source_data[i].append(A_ ) return source_data
72
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def snake_case_ ( A_ : Any, A_ : Union[str, Any], A_ : List[str], A_ : Optional[int], A_ : Dict ): '''simple docstring''' for attribute in key.split('''.''' ): _lowerCamelCase : List[Any] = getattr(A_, A_ ) if weight_type is not None: _lowerCamelCase : int = getattr(A_, A_ ).shape else: _lowerCamelCase : Optional[Any] = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _lowerCamelCase : Optional[int] = value elif weight_type == "weight_g": _lowerCamelCase : Optional[int] = value elif weight_type == "weight_v": _lowerCamelCase : Union[str, Any] = value elif weight_type == "bias": _lowerCamelCase : List[Any] = value else: _lowerCamelCase : Dict = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def snake_case_ ( A_ : Optional[int], A_ : int, A_ : Any ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = [] _lowerCamelCase : Dict = fairseq_model.state_dict() _lowerCamelCase : Dict = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _lowerCamelCase : List[Any] = False if "conv_layers" in name: load_conv_layer( A_, A_, A_, A_, hf_model.config.feat_extract_norm == '''group''', ) _lowerCamelCase : Optional[int] = True else: for key, mapped_key in MAPPING.items(): _lowerCamelCase : List[Any] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned): _lowerCamelCase : str = True if "*" in mapped_key: _lowerCamelCase : int = name.split(A_ )[0].split('''.''' )[-2] _lowerCamelCase : Tuple = mapped_key.replace('''*''', A_ ) if "weight_g" in name: _lowerCamelCase : List[Any] = '''weight_g''' elif "weight_v" in name: _lowerCamelCase : Tuple = '''weight_v''' elif "weight" in name: _lowerCamelCase : Optional[int] = '''weight''' elif "bias" in name: _lowerCamelCase : Union[str, Any] = '''bias''' else: _lowerCamelCase : Tuple = None set_recursively(A_, A_, A_, A_, A_ ) continue if not is_used: unused_weights.append(A_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def snake_case_ ( A_ : Any, A_ : int, A_ : List[Any], A_ : List[str], A_ : Dict ): '''simple docstring''' _lowerCamelCase : Optional[int] = full_name.split('''conv_layers.''' )[-1] _lowerCamelCase : Optional[Any] = name.split('''.''' ) _lowerCamelCase : int = int(items[0] ) _lowerCamelCase : int = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _lowerCamelCase : int = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _lowerCamelCase : Optional[int] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _lowerCamelCase : Any = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _lowerCamelCase : str = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(A_ ) @torch.no_grad() def snake_case_ ( A_ : Tuple, A_ : str, A_ : Tuple=None, A_ : List[Any]=None, A_ : Optional[int]=True ): '''simple docstring''' if config_path is not None: _lowerCamelCase : Optional[Any] = HubertConfig.from_pretrained(A_ ) else: _lowerCamelCase : Union[str, Any] = HubertConfig() if is_finetuned: if dict_path: _lowerCamelCase : int = Dictionary.load(A_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowerCamelCase : List[str] = target_dict.pad_index _lowerCamelCase : Optional[int] = target_dict.bos_index _lowerCamelCase : List[Any] = target_dict.eos_index _lowerCamelCase : Tuple = len(target_dict.symbols ) _lowerCamelCase : Optional[Any] = os.path.join(A_, '''vocab.json''' ) if not os.path.isdir(A_ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(A_ ) ) return os.makedirs(A_, exist_ok=A_ ) with open(A_, '''w''', encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices, A_ ) _lowerCamelCase : List[Any] = WavaVecaCTCTokenizer( A_, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token='''|''', do_lower_case=A_, ) _lowerCamelCase : Union[str, Any] = True if config.feat_extract_norm == '''layer''' else False _lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1, sampling_rate=1_60_00, padding_value=0, do_normalize=A_, return_attention_mask=A_, ) _lowerCamelCase : Optional[Any] = WavaVecaProcessor(feature_extractor=A_, tokenizer=A_ ) processor.save_pretrained(A_ ) _lowerCamelCase : Dict = HubertForCTC(A_ ) else: _lowerCamelCase : Tuple = HubertModel(A_ ) if is_finetuned: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) _lowerCamelCase : Any = model[0].eval() recursively_load_weights(A_, A_, A_ ) hf_wavavec.save_pretrained(A_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) lowerCAmelCase__ = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
72
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __snake_case ( _lowercase): snake_case__ : List[str] = "unispeech" def __init__( self : List[str] , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Dict=1E-5 , __lowerCAmelCase : Optional[int]="group" , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=1_2_8 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=3_2_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=1_0_0 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict="mean" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Dict=8_0 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=0.5 , **__lowerCAmelCase : Optional[Any] , ): """simple docstring""" super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase ) _lowerCamelCase : Dict = hidden_size _lowerCamelCase : Any = feat_extract_norm _lowerCamelCase : List[Any] = feat_extract_activation _lowerCamelCase : Any = list(__lowerCAmelCase ) _lowerCamelCase : Tuple = list(__lowerCAmelCase ) _lowerCamelCase : int = list(__lowerCAmelCase ) _lowerCamelCase : List[str] = conv_bias _lowerCamelCase : List[str] = num_conv_pos_embeddings _lowerCamelCase : Tuple = num_conv_pos_embedding_groups _lowerCamelCase : List[str] = len(self.conv_dim ) _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : List[Any] = intermediate_size _lowerCamelCase : Dict = hidden_act _lowerCamelCase : Union[str, Any] = num_attention_heads _lowerCamelCase : Tuple = hidden_dropout _lowerCamelCase : List[Any] = attention_dropout _lowerCamelCase : Optional[int] = activation_dropout _lowerCamelCase : Optional[Any] = feat_proj_dropout _lowerCamelCase : Optional[int] = final_dropout _lowerCamelCase : Any = layerdrop _lowerCamelCase : Any = layer_norm_eps _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : List[str] = num_ctc_classes _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Optional[Any] = do_stable_layer_norm _lowerCamelCase : Tuple = use_weighted_layer_sum _lowerCamelCase : List[Any] = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCamelCase : Any = apply_spec_augment _lowerCamelCase : Dict = mask_time_prob _lowerCamelCase : List[str] = mask_time_length _lowerCamelCase : Optional[Any] = mask_time_min_masks _lowerCamelCase : List[str] = mask_feature_prob _lowerCamelCase : int = mask_feature_length _lowerCamelCase : Dict = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _lowerCamelCase : Optional[Any] = num_codevectors_per_group _lowerCamelCase : int = num_codevector_groups _lowerCamelCase : List[Any] = contrastive_logits_temperature _lowerCamelCase : List[str] = feat_quantizer_dropout _lowerCamelCase : Dict = num_negatives _lowerCamelCase : Optional[int] = codevector_dim _lowerCamelCase : List[Any] = proj_codevector_dim _lowerCamelCase : List[Any] = diversity_loss_weight # ctc loss _lowerCamelCase : Union[str, Any] = ctc_loss_reduction _lowerCamelCase : Any = ctc_zero_infinity # pretraining loss _lowerCamelCase : str = replace_prob @property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
72
1
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def snake_case_ ( A_ : Optional[int]=None ): '''simple docstring''' if subparsers is not None: _lowerCamelCase : Tuple = subparsers.add_parser('''test''' ) else: _lowerCamelCase : List[str] = argparse.ArgumentParser('''Accelerate test command''' ) parser.add_argument( '''--config_file''', default=A_, help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ), ) if subparsers is not None: parser.set_defaults(func=A_ ) return parser def snake_case_ ( A_ : List[Any] ): '''simple docstring''' _lowerCamelCase : int = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] ) if args.config_file is None: _lowerCamelCase : Tuple = script_name else: _lowerCamelCase : List[Any] = F'''--config_file={args.config_file} {script_name}''' _lowerCamelCase : Tuple = ['''accelerate-launch'''] + test_args.split() _lowerCamelCase : List[Any] = execute_subprocess_async(A_, env=os.environ.copy() ) if result.returncode == 0: print('''Test is a success! You are ready for your distributed training!''' ) def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Tuple = test_command_parser() _lowerCamelCase : List[Any] = parser.parse_args() test_command(A_ ) if __name__ == "__main__": main()
72
"""simple docstring""" from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def snake_case_ ( A_ : str, A_ : str, A_ : Optional[str] = None ): '''simple docstring''' if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release: # old versions of hfh don't url-encode the file path _lowerCamelCase : Optional[Any] = quote(A_ ) return hfh.hf_hub_url(A_, A_, repo_type='''dataset''', revision=A_ )
72
1
"""simple docstring""" from math import factorial lowerCAmelCase__ = {str(d): factorial(d) for d in range(10)} def snake_case_ ( A_ : int ): '''simple docstring''' return sum(DIGIT_FACTORIAL[d] for d in str(A_ ) ) def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Optional[Any] = 7 * factorial(9 ) + 1 return sum(i for i in range(3, A_ ) if sum_of_digit_factorial(A_ ) == i ) if __name__ == "__main__": print(F"""{solution() = }""")
72
"""simple docstring""" import unittest import numpy as np def snake_case_ ( A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray | None = None, ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = np.shape(A_ ) _lowerCamelCase : List[str] = np.shape(A_ ) _lowerCamelCase : List[str] = np.shape(A_ ) if shape_a[0] != shape_b[0]: _lowerCamelCase : Tuple = ( '''Expected the same number of rows for A and B. ''' F'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(A_ ) if shape_b[1] != shape_c[1]: _lowerCamelCase : Tuple = ( '''Expected the same number of columns for B and C. ''' F'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(A_ ) _lowerCamelCase : List[str] = pseudo_inv if a_inv is None: try: _lowerCamelCase : Any = np.linalg.inv(A_ ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : List[str] = np.array([[2, 1], [6, 3]] ) _lowerCamelCase : List[Any] = schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Dict = np.block([[a, b], [b.T, c]] ) _lowerCamelCase : Tuple = np.linalg.det(__lowerCAmelCase ) _lowerCamelCase : List[str] = np.linalg.det(__lowerCAmelCase ) _lowerCamelCase : Any = np.linalg.det(__lowerCAmelCase ) self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : int = np.array([[2, 1], [6, 3]] ) with self.assertRaises(__lowerCAmelCase ): schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : Union[str, Any] = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(__lowerCAmelCase ): schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
72
1
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase__ = logging.get_logger(__name__) # TODO: upload to AWS lowerCAmelCase__ = { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json''' ), } class __snake_case ( _lowercase): snake_case__ : int = "retribert" def __init__( self : Optional[int] , __lowerCAmelCase : str=3_0_5_2_2 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1E-12 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : str , ): """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) _lowerCamelCase : Dict = vocab_size _lowerCamelCase : Union[str, Any] = hidden_size _lowerCamelCase : Dict = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : int = hidden_act _lowerCamelCase : str = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : List[Any] = attention_probs_dropout_prob _lowerCamelCase : Optional[int] = max_position_embeddings _lowerCamelCase : List[Any] = type_vocab_size _lowerCamelCase : Any = initializer_range _lowerCamelCase : Optional[int] = layer_norm_eps _lowerCamelCase : int = share_encoders _lowerCamelCase : Optional[Any] = projection_dim
72
"""simple docstring""" def snake_case_ ( A_ : list[int], A_ : str ): '''simple docstring''' _lowerCamelCase : Tuple = int(A_ ) # Initialize Result _lowerCamelCase : Dict = [] # Traverse through all denomination for denomination in reversed(A_ ): # Find denominations while int(A_ ) >= int(A_ ): total_value -= int(A_ ) answer.append(A_ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": lowerCAmelCase__ = [] lowerCAmelCase__ = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): lowerCAmelCase__ = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) lowerCAmelCase__ = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000] lowerCAmelCase__ = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(F"""Following is minimal change for {value}: """) lowerCAmelCase__ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
72
1
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): lowerCAmelCase__ = '''pt''' elif is_tf_available(): lowerCAmelCase__ = '''tf''' else: lowerCAmelCase__ = '''jax''' class __snake_case ( _lowercase , unittest.TestCase): snake_case__ : Union[str, Any] = PerceiverTokenizer snake_case__ : Union[str, Any] = False def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" super().setUp() _lowerCamelCase : Any = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' ) def SCREAMING_SNAKE_CASE ( self : int , **__lowerCAmelCase : Optional[Any] ): """simple docstring""" return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Tuple=2_0 , __lowerCAmelCase : List[str]=5 ): """simple docstring""" _lowerCamelCase : Optional[int] = [] for i in range(len(__lowerCAmelCase ) ): try: _lowerCamelCase : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) _lowerCamelCase : Any = list(filter(lambda __lowerCAmelCase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __lowerCAmelCase ) ) _lowerCamelCase : Optional[Any] = list(filter(lambda __lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowerCAmelCase ) , __lowerCAmelCase ) ) if max_length is not None and len(__lowerCAmelCase ) > max_length: _lowerCamelCase : Optional[Any] = toks[:max_length] if min_length is not None and len(__lowerCAmelCase ) < min_length and len(__lowerCAmelCase ) > 0: while len(__lowerCAmelCase ) < min_length: _lowerCamelCase : Tuple = toks + toks # toks_str = [t[1] for t in toks] _lowerCamelCase : List[Any] = [t[0] for t in toks] # Ensure consistency _lowerCamelCase : Any = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) if " " not in output_txt and len(__lowerCAmelCase ) > 1: _lowerCamelCase : int = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCAmelCase ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCAmelCase ) ) if with_prefix_space: _lowerCamelCase : Any = ''' ''' + output_txt _lowerCamelCase : int = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) return output_txt, output_ids def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Tuple = self.perceiver_tokenizer _lowerCamelCase : Optional[Any] = '''Unicode €.''' _lowerCamelCase : str = tokenizer(__lowerCAmelCase ) _lowerCamelCase : List[Any] = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5] self.assertEqual(encoded['''input_ids'''] , __lowerCAmelCase ) # decoding _lowerCamelCase : Union[str, Any] = tokenizer.decode(__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , '''[CLS]Unicode €.[SEP]''' ) _lowerCamelCase : Union[str, Any] = tokenizer('''e è é ê ë''' ) _lowerCamelCase : int = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5] self.assertEqual(encoded['''input_ids'''] , __lowerCAmelCase ) # decoding _lowerCamelCase : List[Any] = tokenizer.decode(__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , '''[CLS]e è é ê ë[SEP]''' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : List[str] = self.perceiver_tokenizer _lowerCamelCase : List[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] # fmt: off _lowerCamelCase : Dict = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0] # fmt: on _lowerCamelCase : Optional[Any] = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) if FRAMEWORK != "jax": _lowerCamelCase : str = list(batch.input_ids.numpy()[0] ) else: _lowerCamelCase : Any = list(batch.input_ids.tolist()[0] ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertEqual((2, 3_8) , batch.input_ids.shape ) self.assertEqual((2, 3_8) , batch.attention_mask.shape ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : List[Any] = self.perceiver_tokenizer _lowerCamelCase : Optional[int] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] _lowerCamelCase : Union[str, Any] = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('''input_ids''' , __lowerCAmelCase ) self.assertIn('''attention_mask''' , __lowerCAmelCase ) self.assertNotIn('''decoder_input_ids''' , __lowerCAmelCase ) self.assertNotIn('''decoder_attention_mask''' , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = self.perceiver_tokenizer _lowerCamelCase : str = [ '''Summary of the text.''', '''Another summary.''', ] _lowerCamelCase : int = tokenizer( text_target=__lowerCAmelCase , max_length=3_2 , padding='''max_length''' , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) self.assertEqual(3_2 , targets['''input_ids'''].shape[1] ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : int = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 4_2 ) # Now let's start the test _lowerCamelCase : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _lowerCamelCase : Optional[int] = tempfile.mkdtemp() _lowerCamelCase : Optional[int] = ''' He is very happy, UNwant\u00E9d,running''' _lowerCamelCase : Dict = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) tokenizer.save_pretrained(__lowerCAmelCase ) _lowerCamelCase : Any = tokenizer.__class__.from_pretrained(__lowerCAmelCase ) _lowerCamelCase : List[str] = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) shutil.rmtree(__lowerCAmelCase ) _lowerCamelCase : str = self.get_tokenizers(model_max_length=4_2 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _lowerCamelCase : List[Any] = tempfile.mkdtemp() _lowerCamelCase : Optional[Any] = ''' He is very happy, UNwant\u00E9d,running''' tokenizer.add_tokens(['''bim''', '''bambam'''] ) _lowerCamelCase : Tuple = tokenizer.additional_special_tokens additional_special_tokens.append('''new_additional_special_token''' ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) _lowerCamelCase : Any = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) tokenizer.save_pretrained(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCAmelCase ) _lowerCamelCase : Dict = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 4_2 ) _lowerCamelCase : Tuple = tokenizer.__class__.from_pretrained(__lowerCAmelCase , model_max_length=4_3 ) self.assertEqual(tokenizer.model_max_length , 4_3 ) shutil.rmtree(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : List[str] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__lowerCAmelCase ) with open(os.path.join(__lowerCAmelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: _lowerCamelCase : Union[str, Any] = json.load(__lowerCAmelCase ) with open(os.path.join(__lowerCAmelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: _lowerCamelCase : Optional[int] = json.load(__lowerCAmelCase ) _lowerCamelCase : str = [f'''<extra_id_{i}>''' for i in range(1_2_5 )] _lowerCamelCase : Optional[int] = added_tokens_extra_ids + [ '''an_additional_special_token''' ] _lowerCamelCase : Any = added_tokens_extra_ids + [ '''an_additional_special_token''' ] with open(os.path.join(__lowerCAmelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(__lowerCAmelCase , __lowerCAmelCase ) with open(os.path.join(__lowerCAmelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(__lowerCAmelCase , __lowerCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _lowerCamelCase : Dict = tokenizer_class.from_pretrained( __lowerCAmelCase , ) self.assertIn( '''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _lowerCamelCase : Union[str, Any] = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__lowerCAmelCase )] _lowerCamelCase : int = tokenizer_class.from_pretrained( __lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , ) self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens ) self.assertEqual( ['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : Optional[int] = self.get_tokenizers(fast=__lowerCAmelCase , do_lower_case=__lowerCAmelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): _lowerCamelCase : Any = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]'''] _lowerCamelCase : Dict = tokenizer.convert_tokens_to_string(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
72
"""simple docstring""" def snake_case_ ( A_ : int = 2_00_00_00 ): '''simple docstring''' _lowerCamelCase : int = [0 for i in range(n + 1 )] _lowerCamelCase : List[str] = 1 _lowerCamelCase : Any = 1 for i in range(2, int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i, n + 1, A_ ): _lowerCamelCase : str = 1 _lowerCamelCase : Tuple = 0 for i in range(A_ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(F"""{solution() = }""")
72
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCAmelCase__ = { '''configuration_efficientformer''': [ '''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EfficientFormerConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''EfficientFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EfficientFormerForImageClassification''', '''EfficientFormerForImageClassificationWithTeacher''', '''EfficientFormerModel''', '''EfficientFormerPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFEfficientFormerForImageClassification''', '''TFEfficientFormerForImageClassificationWithTeacher''', '''TFEfficientFormerModel''', '''TFEfficientFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def snake_case_ ( A_ : Any ): '''simple docstring''' _lowerCamelCase : Any = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(A_, A_ ) def snake_case_ ( A_ : Union[str, Any] ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : Tuple = emb.weight.shape _lowerCamelCase : Dict = nn.Linear(A_, A_, bias=A_ ) _lowerCamelCase : str = emb.weight.data return lin_layer def snake_case_ ( A_ : str, A_ : Optional[int]="facebook/mbart-large-en-ro", A_ : Union[str, Any]=False, A_ : List[str]=False ): '''simple docstring''' _lowerCamelCase : Tuple = torch.load(A_, map_location='''cpu''' )['''model'''] remove_ignore_keys_(A_ ) _lowerCamelCase : int = state_dict['''encoder.embed_tokens.weight'''].shape[0] _lowerCamelCase : Any = MBartConfig.from_pretrained(A_, vocab_size=A_ ) if mbart_aa and finetuned: _lowerCamelCase : Any = '''relu''' _lowerCamelCase : Optional[int] = state_dict['''decoder.embed_tokens.weight'''] _lowerCamelCase : Any = MBartForConditionalGeneration(A_ ) model.model.load_state_dict(A_ ) if finetuned: _lowerCamelCase : str = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
72
1
"""simple docstring""" import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType lowerCAmelCase__ = get_logger(__name__) def snake_case_ ( A_ : Dict, A_ : Optional[Any], A_ : List[str], A_ : Tuple, A_ : List[Any]=0 ): '''simple docstring''' os.makedirs(A_, exist_ok=A_ ) with FSDP.state_dict_type( A_, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): _lowerCamelCase : Any = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: _lowerCamelCase : Union[str, Any] = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' _lowerCamelCase : Tuple = os.path.join(A_, A_ ) if accelerator.process_index == 0: logger.info(F'''Saving model to {output_model_file}''' ) torch.save(A_, A_ ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: _lowerCamelCase : Optional[Any] = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) _lowerCamelCase : int = os.path.join(A_, A_ ) logger.info(F'''Saving model to {output_model_file}''' ) torch.save(A_, A_ ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: _lowerCamelCase : List[Any] = os.path.join(A_, F'''{MODEL_NAME}_{model_index}''' ) os.makedirs(A_, exist_ok=A_ ) logger.info(F'''Saving model to {ckpt_dir}''' ) _lowerCamelCase : Tuple = {'''model''': state_dict} dist_cp.save_state_dict( state_dict=A_, storage_writer=dist_cp.FileSystemWriter(A_ ), planner=DefaultSavePlanner(), ) logger.info(F'''Model saved to {ckpt_dir}''' ) def snake_case_ ( A_ : Optional[int], A_ : Union[str, Any], A_ : List[Any], A_ : Union[str, Any], A_ : Optional[int]=0 ): '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( A_, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(A_ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( '''Set the `sync_module_states` flag to `True` so that model states are synced across processes when ''' '''initializing FSDP object''' ) return _lowerCamelCase : int = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' _lowerCamelCase : List[Any] = os.path.join(A_, A_ ) logger.info(F'''Loading model from {input_model_file}''' ) _lowerCamelCase : Optional[int] = torch.load(A_ ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: _lowerCamelCase : Any = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) _lowerCamelCase : Tuple = os.path.join(A_, A_ ) logger.info(F'''Loading model from {input_model_file}''' ) _lowerCamelCase : Any = torch.load(A_ ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: _lowerCamelCase : List[str] = ( os.path.join(A_, F'''{MODEL_NAME}_{model_index}''' ) if F'''{MODEL_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading model from {ckpt_dir}''' ) _lowerCamelCase : Tuple = {'''model''': model.state_dict()} dist_cp.load_state_dict( state_dict=A_, storage_reader=dist_cp.FileSystemReader(A_ ), planner=DefaultLoadPlanner(), ) _lowerCamelCase : Optional[int] = state_dict['''model'''] logger.info(F'''Model loaded from {ckpt_dir}''' ) model.load_state_dict(A_ ) def snake_case_ ( A_ : Any, A_ : Union[str, Any], A_ : int, A_ : Optional[Any], A_ : int, A_ : Tuple=0 ): '''simple docstring''' os.makedirs(A_, exist_ok=A_ ) with FSDP.state_dict_type( A_, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): _lowerCamelCase : int = FSDP.optim_state_dict(A_, A_ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: _lowerCamelCase : Any = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) _lowerCamelCase : List[Any] = os.path.join(A_, A_ ) logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' ) torch.save(A_, A_ ) logger.info(F'''Optimizer state saved in {output_optimizer_file}''' ) else: _lowerCamelCase : Dict = os.path.join(A_, F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) os.makedirs(A_, exist_ok=A_ ) logger.info(F'''Saving Optimizer state to {ckpt_dir}''' ) dist_cp.save_state_dict( state_dict={'''optimizer''': optim_state}, storage_writer=dist_cp.FileSystemWriter(A_ ), planner=DefaultSavePlanner(), ) logger.info(F'''Optimizer state saved in {ckpt_dir}''' ) def snake_case_ ( A_ : Tuple, A_ : Dict, A_ : Any, A_ : Optional[int], A_ : List[str], A_ : List[Any]=0 ): '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( A_, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: _lowerCamelCase : Any = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: _lowerCamelCase : Dict = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) _lowerCamelCase : Any = os.path.join(A_, A_ ) logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' ) _lowerCamelCase : List[Any] = torch.load(A_ ) logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' ) else: _lowerCamelCase : List[str] = ( os.path.join(A_, F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) if F'''{OPTIMIZER_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading Optimizer from {ckpt_dir}''' ) _lowerCamelCase : int = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict(), optimizer_key='''optimizer''', storage_reader=dist_cp.FileSystemReader(A_ ), ) _lowerCamelCase : List[str] = optim_state['''optimizer'''] logger.info(F'''Optimizer loaded from {ckpt_dir}''' ) _lowerCamelCase : List[str] = FSDP.optim_state_dict_to_load(A_, A_, A_ ) optimizer.load_state_dict(A_ )
72
"""simple docstring""" def snake_case_ ( A_ : list[list] ): '''simple docstring''' _lowerCamelCase : Optional[int] = current_set.copy() for row_index, row in enumerate(A_ ): _lowerCamelCase : Tuple = row[0] for column_index, column in enumerate(A_ ): if magnitude == 0: _lowerCamelCase : List[Any] = column continue _lowerCamelCase : List[Any] = column / magnitude # Subtract to cancel term _lowerCamelCase : Union[str, Any] = current_set[0] _lowerCamelCase : Dict = [first_row] _lowerCamelCase : str = current_set[1::] for row in current_set: _lowerCamelCase : Union[str, Any] = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(A_ ) continue for column_index in range(len(A_ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(A_ ) # Create next recursion iteration set if len(final_set[0] ) != 3: _lowerCamelCase : Any = final_set[0] _lowerCamelCase : Any = [] _lowerCamelCase : Optional[int] = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _lowerCamelCase : Dict = simplify(A_ ) for i in range(len(A_ ) ): resultant[i].insert(0, current_first_column[i] ) resultant.insert(0, A_ ) _lowerCamelCase : Tuple = resultant return final_set def snake_case_ ( A_ : list[list] ): '''simple docstring''' if len(A_ ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) _lowerCamelCase : Dict = len(A_ ) + 1 if any(len(A_ ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(A_, (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(A_ ) == 1: return [equations[0][-1] / equations[0][0]] _lowerCamelCase : Optional[Any] = equations.copy() if any(0 in row for row in data_set ): _lowerCamelCase : str = data_set.copy() _lowerCamelCase : List[Any] = [] for row_index, row in enumerate(A_ ): if 0 not in row: _lowerCamelCase : Union[str, Any] = data_set.pop(A_ ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0, A_ ) _lowerCamelCase : List[str] = data_set.copy() _lowerCamelCase : int = simplify(A_ ) _lowerCamelCase : int = simplified[::-1] _lowerCamelCase : list = [] for row in simplified: _lowerCamelCase : Tuple = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _lowerCamelCase : Optional[Any] = row.copy()[: len(A_ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(A_ ) == 0: solutions.append(0 ) continue _lowerCamelCase : Tuple = temp_row[1::] _lowerCamelCase : Tuple = temp_row[::-1] for column_index, column in enumerate(A_ ): current_solution -= column * solutions[column_index] solutions.append(A_ ) _lowerCamelCase : Optional[int] = [] for item in solutions: final.append(float(round(A_, 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
72
1
"""simple docstring""" import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch lowerCAmelCase__ = '''sshleifer/bart-tiny-random''' lowerCAmelCase__ = '''patrickvonplaten/t5-tiny-random''' @require_torch class __snake_case ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return AutoConfig.from_pretrained(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase , *_lowerCamelCase : List[Any] = create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , *_lowerCamelCase : Optional[int] = create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase , *_lowerCamelCase : List[str] = create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=__lowerCAmelCase ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , *_lowerCamelCase : Any = create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" with self.assertRaises(__lowerCAmelCase ): create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=__lowerCAmelCase , d=__lowerCAmelCase )
72
"""simple docstring""" import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __snake_case ( _lowercase): snake_case__ : List[Any] = "Speech2TextFeatureExtractor" snake_case__ : Union[str, Any] = "Speech2TextTokenizer" def __init__( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" super().__init__(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : List[str] = self.feature_extractor _lowerCamelCase : str = False def __call__( self : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) _lowerCamelCase : str = kwargs.pop('''raw_speech''' ) else: _lowerCamelCase : Tuple = kwargs.pop('''audio''' , __lowerCAmelCase ) _lowerCamelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = kwargs.pop('''text''' , __lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: _lowerCamelCase : List[Any] = args[0] _lowerCamelCase : int = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: _lowerCamelCase : List[Any] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None: _lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: _lowerCamelCase : List[str] = encodings['''input_ids'''] return inputs def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ): """simple docstring""" return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @contextmanager def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) _lowerCamelCase : Union[str, Any] = True _lowerCamelCase : Any = self.tokenizer yield _lowerCamelCase : List[str] = self.feature_extractor _lowerCamelCase : Tuple = False
72
1
"""simple docstring""" lowerCAmelCase__ = 65521 def snake_case_ ( A_ : str ): '''simple docstring''' _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : Union[str, Any] = 0 for plain_chr in plain_text: _lowerCamelCase : Tuple = (a + ord(A_ )) % MOD_ADLER _lowerCamelCase : List[str] = (b + a) % MOD_ADLER return (b << 16) | a
72
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
1
"""simple docstring""" import math def snake_case_ ( A_ : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def snake_case_ ( A_ : float = 0.1 ): '''simple docstring''' _lowerCamelCase : Optional[int] = 3 _lowerCamelCase : List[str] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ): primes += is_prime(A_ ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
72
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_autoformer''': [ '''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AutoformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AutoformerForPrediction''', '''AutoformerModel''', '''AutoformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
1
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowerCAmelCase__ = 250004 lowerCAmelCase__ = 250020 @require_sentencepiece @require_tokenizers class __snake_case ( _lowercase , unittest.TestCase): snake_case__ : List[str] = MBartaaTokenizer snake_case__ : Tuple = MBartaaTokenizerFast snake_case__ : Any = True snake_case__ : Optional[Any] = True def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _lowerCamelCase : str = MBartaaTokenizer(__lowerCAmelCase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=__lowerCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[Any] = '''<s>''' _lowerCamelCase : Tuple = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(__lowerCAmelCase ) , 1_0_5_4 ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : str = MBartaaTokenizer(__lowerCAmelCase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__lowerCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) _lowerCamelCase : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __lowerCAmelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , ) _lowerCamelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) self.assertListEqual( __lowerCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] , ) _lowerCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(__lowerCAmelCase ) self.assertListEqual( __lowerCAmelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , ) @slow def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : Any = {'''input_ids''': [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCAmelCase , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _lowerCamelCase : Dict = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) _lowerCamelCase : Dict = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) _lowerCamelCase : int = tempfile.mkdtemp() _lowerCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = tokenizer_p.save_pretrained(__lowerCAmelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) _lowerCamelCase : List[str] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase ) # Checks everything loads correctly in the same way _lowerCamelCase : int = tokenizer_r.from_pretrained(__lowerCAmelCase ) _lowerCamelCase : Tuple = tokenizer_p.from_pretrained(__lowerCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__lowerCAmelCase ) # Save tokenizer rust, legacy_format=True _lowerCamelCase : int = tempfile.mkdtemp() _lowerCamelCase : List[str] = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase ) _lowerCamelCase : Tuple = tokenizer_p.save_pretrained(__lowerCAmelCase ) # Checks it save with the same files self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase ) # Checks everything loads correctly in the same way _lowerCamelCase : List[Any] = tokenizer_r.from_pretrained(__lowerCAmelCase ) _lowerCamelCase : Any = tokenizer_p.from_pretrained(__lowerCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) ) shutil.rmtree(__lowerCAmelCase ) # Save tokenizer rust, legacy_format=False _lowerCamelCase : Union[str, Any] = tempfile.mkdtemp() _lowerCamelCase : Optional[Any] = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = tokenizer_p.save_pretrained(__lowerCAmelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _lowerCamelCase : Dict = tokenizer_r.from_pretrained(__lowerCAmelCase ) _lowerCamelCase : Dict = tokenizer_p.from_pretrained(__lowerCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) ) shutil.rmtree(__lowerCAmelCase ) @require_torch @require_sentencepiece @require_tokenizers class __snake_case ( unittest.TestCase): snake_case__ : int = "facebook/mbart-large-50-one-to-many-mmt" snake_case__ : Optional[int] = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] snake_case__ : Optional[Any] = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] snake_case__ : int = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2] @classmethod def SCREAMING_SNAKE_CASE ( cls : Dict ): """simple docstring""" _lowerCamelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) _lowerCamelCase : Any = 1 return cls def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 2_5_0_0_3_8 ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" self.assertIn(__lowerCAmelCase , self.tokenizer.all_special_ids ) _lowerCamelCase : Optional[Any] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2] _lowerCamelCase : Optional[Any] = self.tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) _lowerCamelCase : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertNotIn(self.tokenizer.eos_token , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[Any] = ['''this is gunna be a long sentence ''' * 2_0] assert isinstance(src_text[0] , __lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = 1_0 _lowerCamelCase : Any = self.tokenizer(__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase ).input_ids[0] self.assertEqual(ids[0] , __lowerCAmelCase ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : str = tempfile.mkdtemp() _lowerCamelCase : Tuple = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(__lowerCAmelCase ) _lowerCamelCase : Any = MBartaaTokenizer.from_pretrained(__lowerCAmelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCAmelCase ) @require_torch def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , return_tensors='''pt''' ) _lowerCamelCase : Optional[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Tuple = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) _lowerCamelCase : Any = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) self.assertEqual((2, 1_4) , batch.input_ids.shape ) self.assertEqual((2, 1_4) , batch.attention_mask.shape ) _lowerCamelCase : str = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : List[Any] = self.tokenizer(self.src_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=3 , return_tensors='''pt''' ) _lowerCamelCase : Optional[Any] = self.tokenizer( text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=1_0 , return_tensors='''pt''' ) _lowerCamelCase : List[Any] = targets['''input_ids'''] _lowerCamelCase : int = shift_tokens_right(__lowerCAmelCase , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 ) @require_torch def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , { # en_XX, A, test, EOS '''input_ids''': [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 2_5_0_0_0_1, } , )
72
"""simple docstring""" import math def snake_case_ ( A_ : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def snake_case_ ( A_ : float = 0.1 ): '''simple docstring''' _lowerCamelCase : Optional[int] = 3 _lowerCamelCase : List[str] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ): primes += is_prime(A_ ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
72
1
"""simple docstring""" lowerCAmelCase__ = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def snake_case_ ( A_ : dict, A_ : int, A_ : int ): '''simple docstring''' _lowerCamelCase : List[str] = set() # keep track of all the paths to be checked _lowerCamelCase : str = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _lowerCamelCase : str = queue.pop(0 ) # get the last node from the path _lowerCamelCase : List[Any] = path[-1] if node not in explored: _lowerCamelCase : Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _lowerCamelCase : Union[str, Any] = list(A_ ) new_path.append(A_ ) queue.append(A_ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(A_ ) # in case there's no path between the 2 nodes return [] def snake_case_ ( A_ : dict, A_ : int, A_ : Dict ): '''simple docstring''' if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _lowerCamelCase : Optional[int] = [start] _lowerCamelCase : int = set(A_ ) # Keep tab on distances from `start` node. _lowerCamelCase : int = {start: 0, target: -1} while queue: _lowerCamelCase : Optional[Any] = queue.pop(0 ) if node == target: _lowerCamelCase : Any = ( dist[node] if dist[target] == -1 else min(dist[target], dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(A_ ) queue.append(A_ ) _lowerCamelCase : Any = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
72
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _lowerCamelCase : Union[str, Any] = TextStreamer(__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCamelCase : int = cs.out[:-1] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : List[str] = tokenizer.decode(greedy_ids[0] ) _lowerCamelCase : Tuple = TextIteratorStreamer(__lowerCAmelCase ) _lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase ) thread.start() _lowerCamelCase : int = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : int = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = greedy_ids[:, input_ids.shape[1] :] _lowerCamelCase : int = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _lowerCamelCase : Any = TextStreamer(__lowerCAmelCase , skip_prompt=__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCamelCase : Union[str, Any] = cs.out[:-1] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' ) _lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : str = -1 _lowerCamelCase : Any = torch.ones((1, 5) , device=__lowerCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: _lowerCamelCase : List[Any] = TextStreamer(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _lowerCamelCase : Any = cs.out[:-1] # Remove the final "\n" _lowerCamelCase : int = tokenizer(__lowerCAmelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = -1 _lowerCamelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = TextIteratorStreamer(__lowerCAmelCase , timeout=0.0_01 ) _lowerCamelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__lowerCAmelCase ): _lowerCamelCase : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text
72
1
"""simple docstring""" from abc import ABC, abstractmethod from typing import List, Optional class __snake_case ( _lowercase): def __init__( self : str ): """simple docstring""" self.test() def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : Optional[int] = False while not completed: if counter == 1: self.reset() _lowerCamelCase : Optional[int] = self.advance() if not self.does_advance(__lowerCAmelCase ): raise Exception( '''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' ) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.update(__lowerCAmelCase ) counter += 1 if counter > 1_0_0_0_0: raise Exception('''update() does not fulfill the constraint.''' ) if self.remaining() != 0: raise Exception('''Custom Constraint is not defined correctly.''' ) @abstractmethod def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : int ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : int ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Tuple=False ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class __snake_case ( _lowercase): def __init__( self : Optional[int] , __lowerCAmelCase : List[int] ): """simple docstring""" super(__lowerCAmelCase , self ).__init__() if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or len(__lowerCAmelCase ) == 0: raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) _lowerCamelCase : Union[str, Any] = token_ids _lowerCamelCase : Dict = len(self.token_ids ) _lowerCamelCase : List[str] = -1 # the index of the currently fulfilled step _lowerCamelCase : List[Any] = False def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : int ): """simple docstring""" if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(__lowerCAmelCase )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int ): """simple docstring""" if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(__lowerCAmelCase )}''' ) _lowerCamelCase : List[str] = False _lowerCamelCase : Union[str, Any] = False _lowerCamelCase : Optional[int] = False if self.does_advance(__lowerCAmelCase ): self.fulfilled_idx += 1 _lowerCamelCase : List[Any] = True if self.fulfilled_idx == (self.seqlen - 1): _lowerCamelCase : int = True _lowerCamelCase : Optional[Any] = completed else: # failed to make progress. _lowerCamelCase : str = True self.reset() return stepped, completed, reset def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : str = False _lowerCamelCase : Tuple = 0 def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" return self.seqlen - (self.fulfilled_idx + 1) def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Dict=False ): """simple docstring""" _lowerCamelCase : Optional[int] = PhrasalConstraint(self.token_ids ) if stateful: _lowerCamelCase : Any = self.seqlen _lowerCamelCase : List[Any] = self.fulfilled_idx _lowerCamelCase : Dict = self.completed return new_constraint class __snake_case : def __init__( self : Dict , __lowerCAmelCase : List[List[int]] , __lowerCAmelCase : Dict=True ): """simple docstring""" _lowerCamelCase : Any = max([len(__lowerCAmelCase ) for one in nested_token_ids] ) _lowerCamelCase : Any = {} for token_ids in nested_token_ids: _lowerCamelCase : Dict = root for tidx, token_id in enumerate(__lowerCAmelCase ): if token_id not in level: _lowerCamelCase : List[Any] = {} _lowerCamelCase : Union[str, Any] = level[token_id] if no_subsets and self.has_subsets(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError( '''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is''' f''' {nested_token_ids}.''' ) _lowerCamelCase : Dict = root def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : List[Any] = self.trie for current_token in current_seq: _lowerCamelCase : str = start[current_token] _lowerCamelCase : Any = list(start.keys() ) return next_tokens def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Dict = self.next_tokens(__lowerCAmelCase ) return len(__lowerCAmelCase ) == 0 def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : Dict = list(root.values() ) if len(__lowerCAmelCase ) == 0: return 1 else: return sum([self.count_leaves(__lowerCAmelCase ) for nn in next_nodes] ) def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : str ): """simple docstring""" _lowerCamelCase : Optional[Any] = self.count_leaves(__lowerCAmelCase ) return len(__lowerCAmelCase ) != leaf_count class __snake_case ( _lowercase): def __init__( self : Dict , __lowerCAmelCase : List[List[int]] ): """simple docstring""" super(__lowerCAmelCase , self ).__init__() if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or len(__lowerCAmelCase ) == 0: raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(__lowerCAmelCase , __lowerCAmelCase ) for token_ids in nested_token_ids ): raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) _lowerCamelCase : str = DisjunctiveTrie(__lowerCAmelCase ) _lowerCamelCase : Any = nested_token_ids _lowerCamelCase : List[Any] = self.trie.max_height _lowerCamelCase : Optional[int] = [] _lowerCamelCase : List[str] = False def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Any = self.trie.next_tokens(self.current_seq ) if len(__lowerCAmelCase ) == 0: return None else: return token_list def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : int ): """simple docstring""" if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowerCAmelCase )}''' ) _lowerCamelCase : int = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : int ): """simple docstring""" if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowerCAmelCase )}''' ) _lowerCamelCase : int = False _lowerCamelCase : str = False _lowerCamelCase : Optional[Any] = False if self.does_advance(__lowerCAmelCase ): self.current_seq.append(__lowerCAmelCase ) _lowerCamelCase : List[str] = True else: _lowerCamelCase : List[Any] = True self.reset() _lowerCamelCase : List[str] = self.trie.reached_leaf(self.current_seq ) _lowerCamelCase : int = completed return stepped, completed, reset def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Dict = False _lowerCamelCase : List[str] = [] def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str=False ): """simple docstring""" _lowerCamelCase : Optional[Any] = DisjunctiveConstraint(self.token_ids ) if stateful: _lowerCamelCase : int = self.seqlen _lowerCamelCase : List[str] = self.current_seq _lowerCamelCase : Union[str, Any] = self.completed return new_constraint class __snake_case : def __init__( self : str , __lowerCAmelCase : List[Constraint] ): """simple docstring""" _lowerCamelCase : Any = constraints # max # of steps required to fulfill a given constraint _lowerCamelCase : str = max([c.seqlen for c in constraints] ) _lowerCamelCase : List[Any] = len(__lowerCAmelCase ) _lowerCamelCase : str = False self.init_state() def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [] _lowerCamelCase : List[str] = None _lowerCamelCase : Any = [constraint.copy(stateful=__lowerCAmelCase ) for constraint in self.constraints] def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" _lowerCamelCase : Union[str, Any] = constraint.advance() if isinstance(__lowerCAmelCase , __lowerCAmelCase ): token_list.append(__lowerCAmelCase ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): token_list.extend(__lowerCAmelCase ) else: _lowerCamelCase : Any = self.inprogress_constraint.advance() if isinstance(__lowerCAmelCase , __lowerCAmelCase ): token_list.append(__lowerCAmelCase ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): token_list.extend(__lowerCAmelCase ) if len(__lowerCAmelCase ) == 0: return None else: return token_list def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[List[int]] ): """simple docstring""" self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint _lowerCamelCase , _lowerCamelCase : str = self.add(__lowerCAmelCase ) # the entire list of constraints are fulfilled if self.completed: break def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : int ): """simple docstring""" if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' ) _lowerCamelCase , _lowerCamelCase : List[str] = False, False if self.completed: _lowerCamelCase : int = True _lowerCamelCase : Optional[Any] = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = self.inprogress_constraint.update(__lowerCAmelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__lowerCAmelCase ) ) _lowerCamelCase : List[str] = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) _lowerCamelCase : str = None if len(self.pending_constraints ) == 0: # we're done! _lowerCamelCase : Any = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(__lowerCAmelCase ): _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = pending_constraint.update(__lowerCAmelCase ) if not stepped: raise Exception( '''`constraint.update(token_id)` is not yielding incremental progress, ''' '''even though `constraint.does_advance(token_id)` is true.''' ) if complete: self.complete_constraints.append(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = None if not complete and stepped: _lowerCamelCase : Union[str, Any] = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". _lowerCamelCase : Union[str, Any] = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. _lowerCamelCase : List[Any] = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Dict=True ): """simple docstring""" _lowerCamelCase : Union[str, Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: _lowerCamelCase : Optional[Any] = [ constraint.copy(stateful=__lowerCAmelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: _lowerCamelCase : List[str] = self.inprogress_constraint.copy(stateful=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = [constraint.copy() for constraint in self.pending_constraints] return new_state
72
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase__ = logging.get_logger(__name__) # TODO: upload to AWS lowerCAmelCase__ = { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json''' ), } class __snake_case ( _lowercase): snake_case__ : int = "retribert" def __init__( self : Optional[int] , __lowerCAmelCase : str=3_0_5_2_2 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1E-12 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : str , ): """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) _lowerCamelCase : Dict = vocab_size _lowerCamelCase : Union[str, Any] = hidden_size _lowerCamelCase : Dict = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : int = hidden_act _lowerCamelCase : str = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : List[Any] = attention_probs_dropout_prob _lowerCamelCase : Optional[int] = max_position_embeddings _lowerCamelCase : List[Any] = type_vocab_size _lowerCamelCase : Any = initializer_range _lowerCamelCase : Optional[int] = layer_norm_eps _lowerCamelCase : int = share_encoders _lowerCamelCase : Optional[Any] = projection_dim
72
1
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) lowerCAmelCase__ = logging.getLogger() def snake_case_ ( A_ : Path, A_ : list ): '''simple docstring''' _lowerCamelCase : int = '''\n'''.join(A_ ) Path(A_ ).open('''w''' ).writelines(A_ ) lowerCAmelCase__ = '''patrickvonplaten/t5-tiny-random''' lowerCAmelCase__ = '''sshleifer/bart-tiny-random''' lowerCAmelCase__ = '''sshleifer/tiny-mbart''' lowerCAmelCase__ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class __snake_case ( _lowercase): def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Tuple ): """simple docstring""" _lowerCamelCase : Tuple = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source''' _lowerCamelCase : Tuple = input_file_name.parent / '''utest_output.txt''' assert not output_file_name.exists() _lowerCamelCase : Optional[int] = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'''] _dump_articles(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Any = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' ) _lowerCamelCase : List[str] = '''translation_en_to_de''' if model == T5_TINY else '''summarization''' _lowerCamelCase : Optional[Any] = f''' run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 '''.split() with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ): run_generate() assert Path(__lowerCAmelCase ).exists() # os.remove(Path(output_file_name)) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" self.run_eval_tester(__lowerCAmelCase ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Optional[int] ): """simple docstring""" self.run_eval_tester(__lowerCAmelCase ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[int] = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source''' _lowerCamelCase : List[str] = input_file_name.parent / '''utest_output.txt''' assert not output_file_name.exists() _lowerCamelCase : List[Any] = { '''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''], '''de''': [ '''Maschinelles Lernen ist großartig, oder?''', '''Ich esse gerne Bananen''', '''Morgen ist wieder ein toller Tag!''', ], } _lowerCamelCase : Dict = Path(self.get_auto_remove_tmp_dir() ) _lowerCamelCase : List[str] = str(tmp_dir / '''scores.json''' ) _lowerCamelCase : Union[str, Any] = str(tmp_dir / '''val.target''' ) _dump_articles(__lowerCAmelCase , text['''en'''] ) _dump_articles(__lowerCAmelCase , text['''de'''] ) _lowerCamelCase : Union[str, Any] = '''translation_en_to_de''' if model == T5_TINY else '''summarization''' _lowerCamelCase : str = f''' run_eval_search.py {model} {str(__lowerCAmelCase )} {str(__lowerCAmelCase )} --score_path {score_path} --reference_path {reference_path} --task {task} '''.split() testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] ) with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ): with CaptureStdout() as cs: run_search() _lowerCamelCase : Optional[Any] = [''' num_beams | length_penalty''', model, '''Best score args'''] _lowerCamelCase : Optional[Any] = ['''Info'''] if "translation" in task: expected_strings.append('''bleu''' ) else: expected_strings.extend(__lowerCAmelCase ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(__lowerCAmelCase ).exists() os.remove(Path(__lowerCAmelCase ) )
72
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : Optional[int] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : str = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] _lowerCamelCase : Optional[Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : Tuple = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _lowerCamelCase : Any = '''fp16''' self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] _lowerCamelCase : str = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] _lowerCamelCase : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : int = '''fp16''' self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
72
1
"""simple docstring""" from math import factorial def snake_case_ ( A_ : int = 1_00 ): '''simple docstring''' return sum(map(A_, str(factorial(A_ ) ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
72
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class __snake_case : def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=1_0 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=3_2 * 4 , __lowerCAmelCase : Dict=3_2 * 6 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=3_2 , ): """simple docstring""" _lowerCamelCase : List[str] = parent _lowerCamelCase : str = batch_size _lowerCamelCase : Dict = is_training _lowerCamelCase : str = use_auxiliary_loss _lowerCamelCase : Any = num_queries _lowerCamelCase : List[Any] = num_channels _lowerCamelCase : int = min_size _lowerCamelCase : Any = max_size _lowerCamelCase : int = num_labels _lowerCamelCase : List[str] = mask_feature_size def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowerCAmelCase ) _lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5 ).float() _lowerCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long() _lowerCamelCase : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.prepare_config_and_inputs() _lowerCamelCase : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = output.encoder_hidden_states _lowerCamelCase : Tuple = output.pixel_decoder_hidden_states _lowerCamelCase : Dict = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers ) def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=False ): """simple docstring""" with torch.no_grad(): _lowerCamelCase : Optional[int] = MaskFormerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Tuple = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) _lowerCamelCase : List[str] = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() def comm_check_on_output(__lowerCAmelCase : Dict ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _lowerCamelCase : str = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) _lowerCamelCase : List[str] = model( pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __snake_case ( _lowercase , _lowercase , unittest.TestCase): snake_case__ : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () snake_case__ : Any = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) snake_case__ : List[str] = False snake_case__ : List[str] = False snake_case__ : Optional[int] = False snake_case__ : Dict = False def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = MaskFormerModelTester(self ) _lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Dict = [*signature.parameters.keys()] _lowerCamelCase : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: _lowerCamelCase : Union[str, Any] = MaskFormerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2 _lowerCamelCase : Union[str, Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=__lowerCAmelCase ), '''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowerCAmelCase ), '''class_labels''': torch.zeros(2 , 1_0 , device=__lowerCAmelCase ).long(), } _lowerCamelCase : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _lowerCamelCase : Union[str, Any] = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : Any = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _lowerCamelCase : List[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : int = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[int] = True _lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) _lowerCamelCase : List[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCamelCase : Tuple = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _lowerCamelCase : List[str] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCamelCase : Optional[int] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) lowerCAmelCase__ = 1E-4 def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class __snake_case ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : str = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowerCAmelCase ) _lowerCamelCase : Any = self.default_image_processor _lowerCamelCase : List[Any] = prepare_img() _lowerCamelCase : Any = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : Any = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : int = model(**__lowerCAmelCase ) _lowerCamelCase : str = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) _lowerCamelCase : Union[str, Any] = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) _lowerCamelCase : Optional[int] = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[int] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : str = prepare_img() _lowerCamelCase : int = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : str = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : Optional[int] = model(**__lowerCAmelCase ) # masks_queries_logits _lowerCamelCase : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCamelCase : List[str] = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] _lowerCamelCase : Any = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits _lowerCamelCase : List[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCamelCase : str = torch.tensor( [ [1.6512E00, -5.2572E00, -3.3519E00], [3.6169E-02, -5.9025E00, -2.9313E00], [1.0766E-04, -7.7630E00, -5.1263E00], ] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Any = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : Tuple = self.default_image_processor _lowerCamelCase : Tuple = prepare_img() _lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : List[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase ) # masks_queries_logits _lowerCamelCase : List[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCamelCase : int = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] _lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits _lowerCamelCase : Dict = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCamelCase : Any = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : List[str] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , ) _lowerCamelCase : Union[str, Any] = inputs['''pixel_values'''].to(__lowerCAmelCase ) _lowerCamelCase : Dict = [el.to(__lowerCAmelCase ) for el in inputs['''mask_labels''']] _lowerCamelCase : Optional[Any] = [el.to(__lowerCAmelCase ) for el in inputs['''class_labels''']] with torch.no_grad(): _lowerCamelCase : Tuple = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
72
1
"""simple docstring""" import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class __snake_case : def __init__( self : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=1_3 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Any=False , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : List[Any]=9_9 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Any=3_2 , __lowerCAmelCase : List[Any]=5 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=5_1_2 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[Any]="last" , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : str=0 , ): """simple docstring""" _lowerCamelCase : Optional[Any] = parent _lowerCamelCase : Any = batch_size _lowerCamelCase : Any = seq_length _lowerCamelCase : Tuple = is_training _lowerCamelCase : Tuple = use_input_lengths _lowerCamelCase : Optional[int] = use_token_type_ids _lowerCamelCase : Dict = use_labels _lowerCamelCase : Optional[Any] = gelu_activation _lowerCamelCase : List[Any] = sinusoidal_embeddings _lowerCamelCase : List[Any] = causal _lowerCamelCase : Any = asm _lowerCamelCase : int = n_langs _lowerCamelCase : Tuple = vocab_size _lowerCamelCase : str = n_special _lowerCamelCase : List[Any] = hidden_size _lowerCamelCase : List[str] = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : int = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Dict = max_position_embeddings _lowerCamelCase : Tuple = type_sequence_label_size _lowerCamelCase : Any = initializer_range _lowerCamelCase : Optional[Any] = num_labels _lowerCamelCase : Dict = num_choices _lowerCamelCase : Tuple = summary_type _lowerCamelCase : List[Any] = use_proj _lowerCamelCase : Tuple = scope _lowerCamelCase : Union[str, Any] = bos_token_id def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCamelCase : Dict = None if self.use_input_lengths: _lowerCamelCase : Tuple = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length _lowerCamelCase : Any = None if self.use_token_type_ids: _lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) _lowerCamelCase : Union[str, Any] = None _lowerCamelCase : List[Any] = None _lowerCamelCase : Optional[int] = None if self.use_labels: _lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCamelCase : Any = ids_tensor([self.batch_size] , 2 ).float() _lowerCamelCase : str = ids_tensor([self.batch_size] , self.num_choices ) _lowerCamelCase : Dict = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : str , ): """simple docstring""" _lowerCamelCase : List[Any] = XLMModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , lengths=__lowerCAmelCase , langs=__lowerCAmelCase ) _lowerCamelCase : List[Any] = model(__lowerCAmelCase , langs=__lowerCAmelCase ) _lowerCamelCase : List[Any] = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , ): """simple docstring""" _lowerCamelCase : str = XLMWithLMHeadModel(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : List[str] = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , ): """simple docstring""" _lowerCamelCase : Any = XLMForQuestionAnsweringSimple(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : List[str] = model(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase ) _lowerCamelCase : List[str] = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , ): """simple docstring""" _lowerCamelCase : Optional[Any] = XLMForQuestionAnswering(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ) _lowerCamelCase : Dict = model( __lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , p_mask=__lowerCAmelCase , ) _lowerCamelCase : Dict = model( __lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , ) ((_lowerCamelCase) , ) : List[str] = result_with_labels.to_tuple() _lowerCamelCase : str = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase ) ((_lowerCamelCase) , ) : Optional[int] = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , ): """simple docstring""" _lowerCamelCase : List[str] = XLMForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Dict = model(__lowerCAmelCase ) _lowerCamelCase : str = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , ): """simple docstring""" _lowerCamelCase : str = self.num_labels _lowerCamelCase : int = XLMForTokenClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : str = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , ): """simple docstring""" _lowerCamelCase : Union[str, Any] = self.num_choices _lowerCamelCase : List[str] = XLMForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCamelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCamelCase : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCamelCase : Tuple = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : Optional[Any] = config_and_inputs _lowerCamelCase : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths} return config, inputs_dict @require_torch class __snake_case ( _lowercase , _lowercase , _lowercase , unittest.TestCase): snake_case__ : Dict = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) snake_case__ : Tuple = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable snake_case__ : Tuple = ( { "feature-extraction": XLMModel, "fill-mask": XLMWithLMHeadModel, "question-answering": XLMForQuestionAnsweringSimple, "text-classification": XLMForSequenceClassification, "text-generation": XLMWithLMHeadModel, "token-classification": XLMForTokenClassification, "zero-shot": XLMForSequenceClassification, } if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : int ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : str=False ): """simple docstring""" _lowerCamelCase : int = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": _lowerCamelCase : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) _lowerCamelCase : Any = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : List[Any] = XLMModelTester(self ) _lowerCamelCase : List[Any] = ConfigTester(self , config_class=__lowerCAmelCase , emb_dim=3_7 ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Tuple=1 ): """simple docstring""" self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) self.assertListEqual( [isinstance(__lowerCAmelCase , __lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(__lowerCAmelCase ) ) self.assertEqual(len(__lowerCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(__lowerCAmelCase ): # adds PAD dummy token _lowerCamelCase : Optional[Any] = min_length + idx + 1 _lowerCamelCase : Optional[Any] = min_length + idx + 1 _lowerCamelCase : Dict = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : str=False , __lowerCAmelCase : Tuple=1 ): """simple docstring""" self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) self.assertListEqual( [isinstance(__lowerCAmelCase , __lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(__lowerCAmelCase ) , ) self.assertEqual(len(__lowerCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(__lowerCAmelCase ): # adds PAD dummy token _lowerCamelCase : str = min_length + idx + 1 _lowerCamelCase : int = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__lowerCAmelCase ) , ) pass @slow def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Dict = XLMModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_torch class __snake_case ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : Any = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' ) model.to(__lowerCAmelCase ) _lowerCamelCase : Tuple = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=__lowerCAmelCase ) # the president _lowerCamelCase : List[Any] = [ 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference _lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase , do_sample=__lowerCAmelCase ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __lowerCAmelCase )
72
"""simple docstring""" lowerCAmelCase__ = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def snake_case_ ( A_ : dict, A_ : int, A_ : int ): '''simple docstring''' _lowerCamelCase : List[str] = set() # keep track of all the paths to be checked _lowerCamelCase : str = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _lowerCamelCase : str = queue.pop(0 ) # get the last node from the path _lowerCamelCase : List[Any] = path[-1] if node not in explored: _lowerCamelCase : Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _lowerCamelCase : Union[str, Any] = list(A_ ) new_path.append(A_ ) queue.append(A_ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(A_ ) # in case there's no path between the 2 nodes return [] def snake_case_ ( A_ : dict, A_ : int, A_ : Dict ): '''simple docstring''' if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _lowerCamelCase : Optional[int] = [start] _lowerCamelCase : int = set(A_ ) # Keep tab on distances from `start` node. _lowerCamelCase : int = {start: 0, target: -1} while queue: _lowerCamelCase : Optional[Any] = queue.pop(0 ) if node == target: _lowerCamelCase : Any = ( dist[node] if dist[target] == -1 else min(dist[target], dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(A_ ) queue.append(A_ ) _lowerCamelCase : Any = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
72
1
"""simple docstring""" from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING lowerCAmelCase__ = logging.get_logger(__name__) @add_end_docstrings(_lowercase) class __snake_case ( _lowercase): def __init__( self : List[Any] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Union[str, Any] ): """simple docstring""" super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) requires_backends(self , '''vision''' ) self.check_model_type(__lowerCAmelCase ) def __call__( self : Tuple , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : str ): """simple docstring""" return super().__call__(__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int , **__lowerCAmelCase : Any ): """simple docstring""" return {}, {}, {} def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Tuple ): """simple docstring""" _lowerCamelCase : List[Any] = load_image(__lowerCAmelCase ) _lowerCamelCase : List[str] = image.size _lowerCamelCase : Dict = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Optional[int] ): """simple docstring""" _lowerCamelCase : int = self.model(**__lowerCAmelCase ) return model_outputs def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str ): """simple docstring""" _lowerCamelCase : List[str] = model_outputs.predicted_depth _lowerCamelCase : Dict = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=__lowerCAmelCase ) _lowerCamelCase : List[str] = prediction.squeeze().cpu().numpy() _lowerCamelCase : Optional[int] = (output * 2_5_5 / np.max(__lowerCAmelCase )).astype('''uint8''' ) _lowerCamelCase : Tuple = Image.fromarray(__lowerCAmelCase ) _lowerCamelCase : Any = {} _lowerCamelCase : Union[str, Any] = predicted_depth _lowerCamelCase : Dict = depth return output_dict
72
"""simple docstring""" import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def snake_case_ ( A_ : Tuple, A_ : int, A_ : Dict ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = LxmertConfig.from_json_file(A_ ) print(F'''Building PyTorch model from configuration: {config}''' ) _lowerCamelCase : List[str] = LxmertForPreTraining(A_ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(A_, A_, A_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), A_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
72
1
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def snake_case_ ( A_ : str = "isbn/0140328726" ): '''simple docstring''' _lowerCamelCase : List[str] = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: _lowerCamelCase : Tuple = F'''{olid} is not a valid Open Library olid''' raise ValueError(A_ ) return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json() def snake_case_ ( A_ : dict ): '''simple docstring''' _lowerCamelCase : List[str] = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } _lowerCamelCase : Union[str, Any] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} _lowerCamelCase : Dict = [ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] _lowerCamelCase : List[str] = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(A_, A_ ): _lowerCamelCase : Any = ''', '''.join(A_ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: lowerCAmelCase__ = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""") continue print(F"""\nSearching Open Library for ISBN: {isbn}...\n""") try: lowerCAmelCase__ = summarize_book(get_openlibrary_data(F"""isbn/{isbn}""")) print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F"""Sorry, there are no results for ISBN: {isbn}.""")
72
"""simple docstring""" from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def snake_case_ ( ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : int = 9, 14 # noqa: F841 _lowerCamelCase : List[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _lowerCamelCase : Any = defaultdict(A_ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) _lowerCamelCase : List[str] = mst(A_ ) _lowerCamelCase : Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _lowerCamelCase : int = tuple(answer[:2] ) _lowerCamelCase : int = tuple(edge[::-1] ) assert edge in result or reverse in result
72
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''', '''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''', } class __snake_case ( _lowercase): snake_case__ : Tuple = "luke" def __init__( self : List[str] , __lowerCAmelCase : Optional[Any]=5_0_2_6_7 , __lowerCAmelCase : Any=5_0_0_0_0_0 , __lowerCAmelCase : Optional[int]=7_6_8 , __lowerCAmelCase : Tuple=2_5_6 , __lowerCAmelCase : str=1_2 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : List[Any]=5_1_2 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Optional[int]=0 , __lowerCAmelCase : Union[str, Any]=2 , **__lowerCAmelCase : List[str] , ): """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) _lowerCamelCase : Tuple = vocab_size _lowerCamelCase : Any = entity_vocab_size _lowerCamelCase : List[Any] = hidden_size _lowerCamelCase : Optional[int] = entity_emb_size _lowerCamelCase : int = num_hidden_layers _lowerCamelCase : List[Any] = num_attention_heads _lowerCamelCase : Any = hidden_act _lowerCamelCase : Dict = intermediate_size _lowerCamelCase : Dict = hidden_dropout_prob _lowerCamelCase : int = attention_probs_dropout_prob _lowerCamelCase : int = max_position_embeddings _lowerCamelCase : Optional[int] = type_vocab_size _lowerCamelCase : Union[str, Any] = initializer_range _lowerCamelCase : Optional[Any] = layer_norm_eps _lowerCamelCase : int = use_entity_aware_attention _lowerCamelCase : List[str] = classifier_dropout
72
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all BART models at https://huggingface.co/models?filter=bart lowerCAmelCase__ = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''', }, } lowerCAmelCase__ = { '''facebook/bart-base''': 1024, '''facebook/bart-large''': 1024, '''facebook/bart-large-mnli''': 1024, '''facebook/bart-large-cnn''': 1024, '''facebook/bart-large-xsum''': 1024, '''yjernite/bart_eli5''': 1024, } class __snake_case ( _lowercase): snake_case__ : Any = VOCAB_FILES_NAMES snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ : Optional[int] = ["input_ids", "attention_mask"] snake_case__ : Any = BartTokenizer def __init__( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]="replace" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Union[str, Any]="<unk>" , __lowerCAmelCase : Any="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Optional[Any] , ): """simple docstring""" super().__init__( __lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , ) _lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space: _lowerCamelCase : Dict = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) ) _lowerCamelCase : Any = add_prefix_space _lowerCamelCase : int = pre_tok_class(**__lowerCAmelCase ) _lowerCamelCase : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _lowerCamelCase : List[str] = '''post_processor''' _lowerCamelCase : List[str] = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) if tokenizer_component_instance: _lowerCamelCase : int = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCamelCase : Tuple = tuple(state['''sep'''] ) if "cls" in state: _lowerCamelCase : int = tuple(state['''cls'''] ) _lowerCamelCase : Union[str, Any] = False if state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space: _lowerCamelCase : Dict = add_prefix_space _lowerCamelCase : Optional[Any] = True if state.get('''trim_offsets''' , __lowerCAmelCase ) != trim_offsets: _lowerCamelCase : Any = trim_offsets _lowerCamelCase : str = True if changes_to_apply: _lowerCamelCase : List[str] = getattr(__lowerCAmelCase , state.pop('''type''' ) ) _lowerCamelCase : str = component_class(**__lowerCAmelCase ) setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : Tuple = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value _lowerCamelCase : str = value def SCREAMING_SNAKE_CASE ( self : int , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : Any = kwargs.get('''is_split_into_words''' , __lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ): """simple docstring""" _lowerCamelCase : Tuple = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase ) return tuple(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=None ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ): """simple docstring""" _lowerCamelCase : List[str] = [self.sep_token_id] _lowerCamelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
72
1
"""simple docstring""" import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''') lowerCAmelCase__ = logging.getLogger(__name__) @dataclass class __snake_case : snake_case__ : Optional[int] = field( default=1_2_8 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) snake_case__ : bool = field( default=_lowercase , metadata={"help": "Overwrite the cached preprocessed datasets or not."}) snake_case__ : bool = field( default=_lowercase , metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) } , ) snake_case__ : Optional[int] = field( default=_lowercase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) snake_case__ : Optional[int] = field( default=_lowercase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) snake_case__ : Optional[int] = field( default=_lowercase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) } , ) @dataclass class __snake_case : snake_case__ : str = field( default=_lowercase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}) snake_case__ : str = field( default=_lowercase , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."}) snake_case__ : Optional[str] = field( default=_lowercase , metadata={"help": "Train language if it is different from the evaluation language."}) snake_case__ : Optional[str] = field( default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"}) snake_case__ : Optional[str] = field( default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) snake_case__ : Optional[str] = field( default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) snake_case__ : Optional[bool] = field( default=_lowercase , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , ) snake_case__ : bool = field( default=_lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) snake_case__ : str = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) snake_case__ : bool = field( default=_lowercase , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) snake_case__ : bool = field( default=_lowercase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , ) def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_xnli''', A_ ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _lowerCamelCase : Union[str, Any] = training_args.get_process_log_level() logger.setLevel(A_ ) datasets.utils.logging.set_verbosity(A_ ) transformers.utils.logging.set_verbosity(A_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _lowerCamelCase : Optional[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _lowerCamelCase : Optional[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: _lowerCamelCase : Tuple = load_dataset( '''xnli''', model_args.language, split='''train''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: _lowerCamelCase : Dict = load_dataset( '''xnli''', model_args.train_language, split='''train''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) _lowerCamelCase : Optional[Any] = train_dataset.features['''label'''].names if training_args.do_eval: _lowerCamelCase : int = load_dataset( '''xnli''', model_args.language, split='''validation''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) _lowerCamelCase : Optional[Any] = eval_dataset.features['''label'''].names if training_args.do_predict: _lowerCamelCase : Optional[Any] = load_dataset( '''xnli''', model_args.language, split='''test''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) _lowerCamelCase : List[str] = predict_dataset.features['''label'''].names # Labels _lowerCamelCase : str = len(A_ ) # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCamelCase : Dict = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=A_, idalabel={str(A_ ): label for i, label in enumerate(A_ )}, labelaid={label: i for i, label in enumerate(A_ )}, finetuning_task='''xnli''', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) _lowerCamelCase : Any = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, do_lower_case=model_args.do_lower_case, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) _lowerCamelCase : Dict = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=A_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ) # Preprocessing the datasets # Padding strategy if data_args.pad_to_max_length: _lowerCamelCase : Dict = '''max_length''' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch _lowerCamelCase : Optional[int] = False def preprocess_function(A_ : List[str] ): # Tokenize the texts return tokenizer( examples['''premise'''], examples['''hypothesis'''], padding=A_, max_length=data_args.max_seq_length, truncation=A_, ) if training_args.do_train: if data_args.max_train_samples is not None: _lowerCamelCase : List[Any] = min(len(A_ ), data_args.max_train_samples ) _lowerCamelCase : List[Any] = train_dataset.select(range(A_ ) ) with training_args.main_process_first(desc='''train dataset map pre-processing''' ): _lowerCamelCase : List[str] = train_dataset.map( A_, batched=A_, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on train dataset''', ) # Log a few random samples from the training set: for index in random.sample(range(len(A_ ) ), 3 ): logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' ) if training_args.do_eval: if data_args.max_eval_samples is not None: _lowerCamelCase : str = min(len(A_ ), data_args.max_eval_samples ) _lowerCamelCase : Dict = eval_dataset.select(range(A_ ) ) with training_args.main_process_first(desc='''validation dataset map pre-processing''' ): _lowerCamelCase : List[Any] = eval_dataset.map( A_, batched=A_, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on validation dataset''', ) if training_args.do_predict: if data_args.max_predict_samples is not None: _lowerCamelCase : str = min(len(A_ ), data_args.max_predict_samples ) _lowerCamelCase : List[str] = predict_dataset.select(range(A_ ) ) with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ): _lowerCamelCase : List[Any] = predict_dataset.map( A_, batched=A_, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on prediction dataset''', ) # Get the metric function _lowerCamelCase : Optional[Any] = evaluate.load('''xnli''' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(A_ : EvalPrediction ): _lowerCamelCase : Tuple = p.predictions[0] if isinstance(p.predictions, A_ ) else p.predictions _lowerCamelCase : Optional[Any] = np.argmax(A_, axis=1 ) return metric.compute(predictions=A_, references=p.label_ids ) # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: _lowerCamelCase : Optional[Any] = default_data_collator elif training_args.fpaa: _lowerCamelCase : Any = DataCollatorWithPadding(A_, pad_to_multiple_of=8 ) else: _lowerCamelCase : int = None # Initialize our Trainer _lowerCamelCase : int = Trainer( model=A_, args=A_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=A_, tokenizer=A_, data_collator=A_, ) # Training if training_args.do_train: _lowerCamelCase : Optional[int] = None if training_args.resume_from_checkpoint is not None: _lowerCamelCase : Any = training_args.resume_from_checkpoint elif last_checkpoint is not None: _lowerCamelCase : Optional[int] = last_checkpoint _lowerCamelCase : List[Any] = trainer.train(resume_from_checkpoint=A_ ) _lowerCamelCase : int = train_result.metrics _lowerCamelCase : Tuple = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(A_ ) ) _lowerCamelCase : Union[str, Any] = min(A_, len(A_ ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('''train''', A_ ) trainer.save_metrics('''train''', A_ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) _lowerCamelCase : Optional[int] = trainer.evaluate(eval_dataset=A_ ) _lowerCamelCase : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(A_ ) _lowerCamelCase : Any = min(A_, len(A_ ) ) trainer.log_metrics('''eval''', A_ ) trainer.save_metrics('''eval''', A_ ) # Prediction if training_args.do_predict: logger.info('''*** Predict ***''' ) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = trainer.predict(A_, metric_key_prefix='''predict''' ) _lowerCamelCase : Optional[int] = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(A_ ) ) _lowerCamelCase : int = min(A_, len(A_ ) ) trainer.log_metrics('''predict''', A_ ) trainer.save_metrics('''predict''', A_ ) _lowerCamelCase : Dict = np.argmax(A_, axis=1 ) _lowerCamelCase : List[str] = os.path.join(training_args.output_dir, '''predictions.txt''' ) if trainer.is_world_process_zero(): with open(A_, '''w''' ) as writer: writer.write('''index\tprediction\n''' ) for index, item in enumerate(A_ ): _lowerCamelCase : List[Any] = label_list[item] writer.write(F'''{index}\t{item}\n''' ) if __name__ == "__main__": main()
72
"""simple docstring""" from __future__ import annotations def snake_case_ ( A_ : str ): '''simple docstring''' return [ord(A_ ) - 96 for elem in plain] def snake_case_ ( A_ : list[int] ): '''simple docstring''' return "".join(chr(elem + 96 ) for elem in encoded ) def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Dict = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''', A_ ) print('''Decoded:''', decode(A_ ) ) if __name__ == "__main__": main()
72
1
"""simple docstring""" import importlib import inspect import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py lowerCAmelCase__ = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. lowerCAmelCase__ = importlib.util.spec_from_file_location( '''transformers''', os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) lowerCAmelCase__ = spec.loader.load_module() lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` lowerCAmelCase__ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''') lowerCAmelCase__ = { '''CLIPConfigMixin''', '''DecisionTransformerConfigMixin''', '''EncoderDecoderConfigMixin''', '''RagConfigMixin''', '''SpeechEncoderDecoderConfigMixin''', '''VisionEncoderDecoderConfigMixin''', '''VisionTextDualEncoderConfigMixin''', } def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : int = [] for config_class in list(CONFIG_MAPPING.values() ): _lowerCamelCase : Tuple = False # source code of `config_class` _lowerCamelCase : Dict = inspect.getsource(A_ ) _lowerCamelCase : int = _re_checkpoint.findall(A_ ) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` _lowerCamelCase , _lowerCamelCase : List[Any] = checkpoint # verify the checkpoint name corresponds to the checkpoint link _lowerCamelCase : List[str] = F'''https://huggingface.co/{ckpt_name}''' if ckpt_link == ckpt_link_from_name: _lowerCamelCase : Union[str, Any] = True break _lowerCamelCase : List[Any] = config_class.__name__ if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(A_ ) if len(A_ ) > 0: _lowerCamelCase : Optional[Any] = '''\n'''.join(sorted(A_ ) ) raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
72
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { '''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''], '''tokenization_lxmert''': ['''LxmertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''LxmertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''LxmertEncoder''', '''LxmertForPreTraining''', '''LxmertForQuestionAnswering''', '''LxmertModel''', '''LxmertPreTrainedModel''', '''LxmertVisualFeatureEncoder''', '''LxmertXLayer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLxmertForPreTraining''', '''TFLxmertMainLayer''', '''TFLxmertModel''', '''TFLxmertPreTrainedModel''', '''TFLxmertVisualFeatureEncoder''', ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
1
"""simple docstring""" import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class __snake_case ( _lowercase , unittest.TestCase): snake_case__ : str = FlaxAutoencoderKL @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Dict = 4 _lowerCamelCase : List[str] = 3 _lowerCamelCase : List[Any] = (3_2, 3_2) _lowerCamelCase : str = jax.random.PRNGKey(0 ) _lowerCamelCase : int = jax.random.uniform(__lowerCAmelCase , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = { '''block_out_channels''': [3_2, 6_4], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } _lowerCamelCase : Tuple = self.dummy_input return init_dict, inputs_dict
72
"""simple docstring""" def snake_case_ ( A_ : list[list[float]] ): '''simple docstring''' _lowerCamelCase : list[list[float]] = [] for data in source_data: for i, el in enumerate(A_ ): if len(A_ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(A_ ) ) return data_lists def snake_case_ ( A_ : list[list[float]], A_ : list[int] ): '''simple docstring''' _lowerCamelCase : list[list[float]] = [] for dlist, weight in zip(A_, A_ ): _lowerCamelCase : Any = min(A_ ) _lowerCamelCase : Optional[Any] = max(A_ ) _lowerCamelCase : list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: _lowerCamelCase : str = F'''Invalid weight of {weight:f} provided''' raise ValueError(A_ ) score_lists.append(A_ ) return score_lists def snake_case_ ( A_ : list[list[float]] ): '''simple docstring''' _lowerCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(A_ ): _lowerCamelCase : List[str] = final_scores[j] + ele return final_scores def snake_case_ ( A_ : list[list[float]], A_ : list[int] ): '''simple docstring''' _lowerCamelCase : Tuple = get_data(A_ ) _lowerCamelCase : Optional[Any] = calculate_each_score(A_, A_ ) _lowerCamelCase : str = generate_final_scores(A_ ) # append scores to source data for i, ele in enumerate(A_ ): source_data[i].append(A_ ) return source_data
72
1
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _lowerCamelCase : Union[str, Any] = TextStreamer(__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCamelCase : int = cs.out[:-1] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : List[str] = tokenizer.decode(greedy_ids[0] ) _lowerCamelCase : Tuple = TextIteratorStreamer(__lowerCAmelCase ) _lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase ) thread.start() _lowerCamelCase : int = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : int = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = greedy_ids[:, input_ids.shape[1] :] _lowerCamelCase : int = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _lowerCamelCase : Any = TextStreamer(__lowerCAmelCase , skip_prompt=__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCamelCase : Union[str, Any] = cs.out[:-1] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' ) _lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : str = -1 _lowerCamelCase : Any = torch.ones((1, 5) , device=__lowerCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: _lowerCamelCase : List[Any] = TextStreamer(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _lowerCamelCase : Any = cs.out[:-1] # Remove the final "\n" _lowerCamelCase : int = tokenizer(__lowerCAmelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = -1 _lowerCamelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = TextIteratorStreamer(__lowerCAmelCase , timeout=0.0_01 ) _lowerCamelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__lowerCAmelCase ): _lowerCamelCase : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text
72
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __snake_case ( _lowercase): snake_case__ : List[str] = "unispeech" def __init__( self : List[str] , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Dict=1E-5 , __lowerCAmelCase : Optional[int]="group" , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=1_2_8 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=3_2_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=1_0_0 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict="mean" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Dict=8_0 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=0.5 , **__lowerCAmelCase : Optional[Any] , ): """simple docstring""" super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase ) _lowerCamelCase : Dict = hidden_size _lowerCamelCase : Any = feat_extract_norm _lowerCamelCase : List[Any] = feat_extract_activation _lowerCamelCase : Any = list(__lowerCAmelCase ) _lowerCamelCase : Tuple = list(__lowerCAmelCase ) _lowerCamelCase : int = list(__lowerCAmelCase ) _lowerCamelCase : List[str] = conv_bias _lowerCamelCase : List[str] = num_conv_pos_embeddings _lowerCamelCase : Tuple = num_conv_pos_embedding_groups _lowerCamelCase : List[str] = len(self.conv_dim ) _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : List[Any] = intermediate_size _lowerCamelCase : Dict = hidden_act _lowerCamelCase : Union[str, Any] = num_attention_heads _lowerCamelCase : Tuple = hidden_dropout _lowerCamelCase : List[Any] = attention_dropout _lowerCamelCase : Optional[int] = activation_dropout _lowerCamelCase : Optional[Any] = feat_proj_dropout _lowerCamelCase : Optional[int] = final_dropout _lowerCamelCase : Any = layerdrop _lowerCamelCase : Any = layer_norm_eps _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : List[str] = num_ctc_classes _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Optional[Any] = do_stable_layer_norm _lowerCamelCase : Tuple = use_weighted_layer_sum _lowerCamelCase : List[Any] = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCamelCase : Any = apply_spec_augment _lowerCamelCase : Dict = mask_time_prob _lowerCamelCase : List[str] = mask_time_length _lowerCamelCase : Optional[Any] = mask_time_min_masks _lowerCamelCase : List[str] = mask_feature_prob _lowerCamelCase : int = mask_feature_length _lowerCamelCase : Dict = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _lowerCamelCase : Optional[Any] = num_codevectors_per_group _lowerCamelCase : int = num_codevector_groups _lowerCamelCase : List[Any] = contrastive_logits_temperature _lowerCamelCase : List[str] = feat_quantizer_dropout _lowerCamelCase : Dict = num_negatives _lowerCamelCase : Optional[int] = codevector_dim _lowerCamelCase : List[Any] = proj_codevector_dim _lowerCamelCase : List[Any] = diversity_loss_weight # ctc loss _lowerCamelCase : Union[str, Any] = ctc_loss_reduction _lowerCamelCase : Any = ctc_zero_infinity # pretraining loss _lowerCamelCase : str = replace_prob @property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
72
1
"""simple docstring""" import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def snake_case_ ( A_ : Optional[int] ): '''simple docstring''' if isinstance(A_, collections.abc.Iterable ): return x return (x, x) @require_flax class __snake_case : def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : str ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : float ): """simple docstring""" _lowerCamelCase : List[Any] = np.abs((a - b) ).max() self.assertLessEqual(__lowerCAmelCase , __lowerCAmelCase , f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : int=None , **__lowerCAmelCase : str ): """simple docstring""" _lowerCamelCase : Optional[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Optional[Any] = FlaxVisionTextDualEncoderModel(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model(input_ids=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) ) def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Any=None , **__lowerCAmelCase : Union[str, Any] ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Any = self.get_vision_text_model(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = {'''vision_model''': vision_model, '''text_model''': text_model} _lowerCamelCase : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model(input_ids=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Optional[int] = self.get_vision_text_model(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : List[Any] = {'''vision_model''': vision_model, '''text_model''': text_model} _lowerCamelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowerCAmelCase ) _lowerCamelCase : Optional[int] = model(input_ids=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase ) _lowerCamelCase : Dict = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(__lowerCAmelCase ) _lowerCamelCase : Any = model(input_ids=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = after_output[0] _lowerCamelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__lowerCAmelCase , 1E-3 ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any]=None , **__lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Dict = self.get_vision_text_model(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Optional[Any] = {'''vision_model''': vision_model, '''text_model''': text_model} _lowerCamelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowerCAmelCase ) _lowerCamelCase : Any = model( input_ids=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , output_attentions=__lowerCAmelCase ) _lowerCamelCase : str = output.vision_model_output.attentions self.assertEqual(len(__lowerCAmelCase ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) _lowerCamelCase : str = to_atuple(vision_model.config.image_size ) _lowerCamelCase : List[Any] = to_atuple(vision_model.config.patch_size ) _lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _lowerCamelCase : Optional[int] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) _lowerCamelCase : Optional[Any] = output.text_model_output.attentions self.assertEqual(len(__lowerCAmelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ): """simple docstring""" pt_model.to(__lowerCAmelCase ) pt_model.eval() # prepare inputs _lowerCamelCase : str = inputs_dict _lowerCamelCase : Optional[int] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): _lowerCamelCase : List[Any] = pt_model(**__lowerCAmelCase ).to_tuple() _lowerCamelCase : List[Any] = fx_model(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(__lowerCAmelCase , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__lowerCAmelCase ) _lowerCamelCase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__lowerCAmelCase , from_pt=__lowerCAmelCase ) _lowerCamelCase : List[Any] = fx_model_loaded(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(__lowerCAmelCase , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__lowerCAmelCase ) _lowerCamelCase : List[Any] = VisionTextDualEncoderModel.from_pretrained(__lowerCAmelCase , from_flax=__lowerCAmelCase ) pt_model_loaded.to(__lowerCAmelCase ) pt_model_loaded.eval() with torch.no_grad(): _lowerCamelCase : Dict = pt_model_loaded(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(__lowerCAmelCase , pt_output_loaded.numpy() , 4E-2 ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ): """simple docstring""" _lowerCamelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : List[Any] = VisionTextDualEncoderModel(__lowerCAmelCase ) _lowerCamelCase : int = FlaxVisionTextDualEncoderModel(__lowerCAmelCase ) _lowerCamelCase : Any = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __lowerCAmelCase ) _lowerCamelCase : Tuple = fx_state self.check_pt_flax_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : str ): """simple docstring""" _lowerCamelCase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : List[str] = VisionTextDualEncoderModel(__lowerCAmelCase ) _lowerCamelCase : str = FlaxVisionTextDualEncoderModel(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase , fx_model.params ) self.check_pt_flax_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : str = self.prepare_config_and_inputs() self.check_save_load(**__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : Tuple = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**__lowerCAmelCase ) @is_pt_flax_cross_test def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : List[str] = self.prepare_config_and_inputs() _lowerCamelCase : Optional[Any] = config_inputs_dict.pop('''vision_config''' ) _lowerCamelCase : Dict = config_inputs_dict.pop('''text_config''' ) _lowerCamelCase : Tuple = config_inputs_dict self.check_equivalence_pt_to_flax(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) self.check_equivalence_flax_to_pt(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Optional[int] = self.get_pretrained_model_and_inputs() _lowerCamelCase : int = model_a(**__lowerCAmelCase ) _lowerCamelCase : List[Any] = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(__lowerCAmelCase ) _lowerCamelCase : Dict = FlaxVisionTextDualEncoderModel.from_pretrained(__lowerCAmelCase ) _lowerCamelCase : List[Any] = model_a(**__lowerCAmelCase ) _lowerCamelCase : Tuple = after_outputs[0] _lowerCamelCase : Dict = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__lowerCAmelCase , 1E-5 ) @require_flax class __snake_case ( _lowercase , unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=__lowerCAmelCase , text_from_pt=__lowerCAmelCase , ) _lowerCamelCase : Optional[Any] = 1_3 _lowerCamelCase : Any = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _lowerCamelCase : Optional[int] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _lowerCamelCase : Tuple = random_attention_mask([batch_size, 4] ) _lowerCamelCase : Dict = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ): """simple docstring""" _lowerCamelCase : List[str] = FlaxViTModel(__lowerCAmelCase ) _lowerCamelCase : List[str] = FlaxBertModel(__lowerCAmelCase ) return vision_model, text_model def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[int] = FlaxViTModelTester(self ) _lowerCamelCase : Optional[Any] = FlaxBertModelTester(self ) _lowerCamelCase : Optional[int] = vit_model_tester.prepare_config_and_inputs() _lowerCamelCase : Optional[Any] = bert_model_tester.prepare_config_and_inputs() _lowerCamelCase , _lowerCamelCase : List[Any] = vision_config_and_inputs _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __snake_case ( _lowercase , unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=__lowerCAmelCase , text_from_pt=__lowerCAmelCase , ) _lowerCamelCase : List[str] = 1_3 _lowerCamelCase : Any = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _lowerCamelCase : List[str] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _lowerCamelCase : str = random_attention_mask([batch_size, 4] ) _lowerCamelCase : List[Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ): """simple docstring""" _lowerCamelCase : List[str] = FlaxCLIPVisionModel(__lowerCAmelCase ) _lowerCamelCase : str = FlaxBertModel(__lowerCAmelCase ) return vision_model, text_model def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : str = FlaxCLIPVisionModelTester(self ) _lowerCamelCase : str = FlaxBertModelTester(self ) _lowerCamelCase : Dict = clip_model_tester.prepare_config_and_inputs() _lowerCamelCase : List[Any] = bert_model_tester.prepare_config_and_inputs() _lowerCamelCase , _lowerCamelCase : Optional[int] = vision_config_and_inputs _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __snake_case ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 ) _lowerCamelCase : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) _lowerCamelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _lowerCamelCase : Union[str, Any] = processor( text=['''una foto di un gatto''', '''una foto di un cane'''] , images=__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='''np''' ) _lowerCamelCase : List[str] = model(**__lowerCAmelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) _lowerCamelCase : List[str] = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image , __lowerCAmelCase , atol=1E-3 ) )
72
"""simple docstring""" from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def snake_case_ ( A_ : str, A_ : str, A_ : Optional[str] = None ): '''simple docstring''' if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release: # old versions of hfh don't url-encode the file path _lowerCamelCase : Optional[Any] = quote(A_ ) return hfh.hf_hub_url(A_, A_, repo_type='''dataset''', revision=A_ )
72
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PegasusXForConditionalGeneration''', '''PegasusXModel''', '''PegasusXPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
"""simple docstring""" import unittest import numpy as np def snake_case_ ( A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray | None = None, ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = np.shape(A_ ) _lowerCamelCase : List[str] = np.shape(A_ ) _lowerCamelCase : List[str] = np.shape(A_ ) if shape_a[0] != shape_b[0]: _lowerCamelCase : Tuple = ( '''Expected the same number of rows for A and B. ''' F'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(A_ ) if shape_b[1] != shape_c[1]: _lowerCamelCase : Tuple = ( '''Expected the same number of columns for B and C. ''' F'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(A_ ) _lowerCamelCase : List[str] = pseudo_inv if a_inv is None: try: _lowerCamelCase : Any = np.linalg.inv(A_ ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : List[str] = np.array([[2, 1], [6, 3]] ) _lowerCamelCase : List[Any] = schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Dict = np.block([[a, b], [b.T, c]] ) _lowerCamelCase : Tuple = np.linalg.det(__lowerCAmelCase ) _lowerCamelCase : List[str] = np.linalg.det(__lowerCAmelCase ) _lowerCamelCase : Any = np.linalg.det(__lowerCAmelCase ) self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : int = np.array([[2, 1], [6, 3]] ) with self.assertRaises(__lowerCAmelCase ): schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : Union[str, Any] = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(__lowerCAmelCase ): schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
72
1
"""simple docstring""" import warnings from .generation import TFGenerationMixin class __snake_case ( _lowercase): # warning at import time warnings.warn( "Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will " "be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , _lowercase , )
72
"""simple docstring""" def snake_case_ ( A_ : list[int], A_ : str ): '''simple docstring''' _lowerCamelCase : Tuple = int(A_ ) # Initialize Result _lowerCamelCase : Dict = [] # Traverse through all denomination for denomination in reversed(A_ ): # Find denominations while int(A_ ) >= int(A_ ): total_value -= int(A_ ) answer.append(A_ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": lowerCAmelCase__ = [] lowerCAmelCase__ = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): lowerCAmelCase__ = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) lowerCAmelCase__ = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000] lowerCAmelCase__ = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(F"""Following is minimal change for {value}: """) lowerCAmelCase__ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
72
1
"""simple docstring""" from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''snap-research/efficientformer-l1-300''': ( '''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json''' ), } class __snake_case ( _lowercase): snake_case__ : Any = "efficientformer" def __init__( self : List[str] , __lowerCAmelCase : List[int] = [3, 2, 6, 4] , __lowerCAmelCase : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , __lowerCAmelCase : List[bool] = [True, True, True, True] , __lowerCAmelCase : int = 4_4_8 , __lowerCAmelCase : int = 3_2 , __lowerCAmelCase : int = 4 , __lowerCAmelCase : int = 7 , __lowerCAmelCase : int = 5 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : int = 4 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : int = 1_6 , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 1 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : int = 1 , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , __lowerCAmelCase : float = 1E-5 , __lowerCAmelCase : str = "gelu" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 1E-12 , __lowerCAmelCase : int = 2_2_4 , __lowerCAmelCase : float = 1E-05 , **__lowerCAmelCase : str , ): """simple docstring""" super().__init__(**__lowerCAmelCase ) _lowerCamelCase : List[Any] = hidden_act _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : Dict = hidden_sizes _lowerCamelCase : int = num_hidden_layers _lowerCamelCase : Optional[int] = num_attention_heads _lowerCamelCase : List[str] = initializer_range _lowerCamelCase : List[str] = layer_norm_eps _lowerCamelCase : int = patch_size _lowerCamelCase : List[str] = num_channels _lowerCamelCase : Optional[int] = depths _lowerCamelCase : Union[str, Any] = mlp_expansion_ratio _lowerCamelCase : Tuple = downsamples _lowerCamelCase : int = dim _lowerCamelCase : Union[str, Any] = key_dim _lowerCamelCase : int = attention_ratio _lowerCamelCase : str = resolution _lowerCamelCase : Dict = pool_size _lowerCamelCase : int = downsample_patch_size _lowerCamelCase : List[Any] = downsample_stride _lowerCamelCase : Optional[Any] = downsample_pad _lowerCamelCase : Any = drop_path_rate _lowerCamelCase : List[str] = num_metaad_blocks _lowerCamelCase : List[Any] = distillation _lowerCamelCase : Union[str, Any] = use_layer_scale _lowerCamelCase : Optional[int] = layer_scale_init_value _lowerCamelCase : Any = image_size _lowerCamelCase : Optional[int] = batch_norm_eps
72
"""simple docstring""" def snake_case_ ( A_ : int = 2_00_00_00 ): '''simple docstring''' _lowerCamelCase : int = [0 for i in range(n + 1 )] _lowerCamelCase : List[str] = 1 _lowerCamelCase : Any = 1 for i in range(2, int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i, n + 1, A_ ): _lowerCamelCase : str = 1 _lowerCamelCase : Tuple = 0 for i in range(A_ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(F"""{solution() = }""")
72
1
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys lowerCAmelCase__ = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''') lowerCAmelCase__ = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split() lowerCAmelCase__ = '''|'''.join(sys.argv[1:]) lowerCAmelCase__ = re.compile(RF"""^({joined_dirs}).*?\.py$""") lowerCAmelCase__ = [x for x in modified_files if regex.match(x)] print(''' '''.join(relevant_modified_files), end='''''')
72
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def snake_case_ ( A_ : Any ): '''simple docstring''' _lowerCamelCase : Any = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(A_, A_ ) def snake_case_ ( A_ : Union[str, Any] ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : Tuple = emb.weight.shape _lowerCamelCase : Dict = nn.Linear(A_, A_, bias=A_ ) _lowerCamelCase : str = emb.weight.data return lin_layer def snake_case_ ( A_ : str, A_ : Optional[int]="facebook/mbart-large-en-ro", A_ : Union[str, Any]=False, A_ : List[str]=False ): '''simple docstring''' _lowerCamelCase : Tuple = torch.load(A_, map_location='''cpu''' )['''model'''] remove_ignore_keys_(A_ ) _lowerCamelCase : int = state_dict['''encoder.embed_tokens.weight'''].shape[0] _lowerCamelCase : Any = MBartConfig.from_pretrained(A_, vocab_size=A_ ) if mbart_aa and finetuned: _lowerCamelCase : Any = '''relu''' _lowerCamelCase : Optional[int] = state_dict['''decoder.embed_tokens.weight'''] _lowerCamelCase : Any = MBartForConditionalGeneration(A_ ) model.model.load_state_dict(A_ ) if finetuned: _lowerCamelCase : str = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
72
1
"""simple docstring""" import collections import importlib.util import os import re from pathlib import Path lowerCAmelCase__ = '''src/transformers''' # Matches is_xxx_available() lowerCAmelCase__ = re.compile(R'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} lowerCAmelCase__ = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCAmelCase__ = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available lowerCAmelCase__ = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", lowerCAmelCase__ = re.compile('''^\s+"([^"]+)",''') # Catches a line with objects between brackets only: ["foo", "bar"], lowerCAmelCase__ = re.compile('''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo lowerCAmelCase__ = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: lowerCAmelCase__ = re.compile(R'''^\s*try:''') # Catches a line with else: lowerCAmelCase__ = re.compile(R'''^\s*else:''') def snake_case_ ( A_ : Union[str, Any] ): '''simple docstring''' if _re_test_backend.search(A_ ) is None: return None _lowerCamelCase : List[Any] = [b[0] for b in _re_backend.findall(A_ )] backends.sort() return "_and_".join(A_ ) def snake_case_ ( A_ : Union[str, Any] ): '''simple docstring''' with open(A_, '''r''', encoding='''utf-8''', newline='''\n''' ) as f: _lowerCamelCase : Any = f.readlines() _lowerCamelCase : Tuple = 0 while line_index < len(A_ ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(A_ ): return None # First grab the objects without a specific backend in _import_structure _lowerCamelCase : int = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: _lowerCamelCase : Any = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(A_ ): _lowerCamelCase : Optional[int] = _re_one_line_import_struct.search(A_ ).groups()[0] _lowerCamelCase : Optional[Any] = re.findall('''\[([^\]]+)\]''', A_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue _lowerCamelCase : Union[str, Any] = _re_import_struct_key_value.search(A_ ) if single_line_import_search is not None: _lowerCamelCase : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(A_ ) > 0] objects.extend(A_ ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 _lowerCamelCase : Dict = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. _lowerCamelCase : Optional[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _lowerCamelCase : Optional[int] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _lowerCamelCase : Union[str, Any] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): _lowerCamelCase : int = lines[line_index] if _re_import_struct_add_one.search(A_ ) is not None: objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] ) elif _re_import_struct_add_many.search(A_ ) is not None: _lowerCamelCase : Optional[Any] = _re_import_struct_add_many.search(A_ ).groups()[0].split(''', ''' ) _lowerCamelCase : int = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_between_brackets.search(A_ ) is not None: _lowerCamelCase : Tuple = _re_between_brackets.search(A_ ).groups()[0].split(''', ''' ) _lowerCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_quote_object.search(A_ ) is not None: objects.append(_re_quote_object.search(A_ ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 _lowerCamelCase : List[str] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _lowerCamelCase : Dict = [] while ( line_index < len(A_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): _lowerCamelCase : int = lines[line_index] _lowerCamelCase : Dict = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 _lowerCamelCase : Any = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(A_ ): # If the line is an if is_backend_available, we grab all objects associated. _lowerCamelCase : Any = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _lowerCamelCase : str = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _lowerCamelCase : Optional[int] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): _lowerCamelCase : Optional[Any] = lines[line_index] _lowerCamelCase : Optional[int] = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 _lowerCamelCase : List[Any] = objects else: line_index += 1 return import_dict_objects, type_hint_objects def snake_case_ ( A_ : Tuple, A_ : List[str] ): '''simple docstring''' def find_duplicates(A_ : List[str] ): return [k for k, v in collections.Counter(A_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _lowerCamelCase : Tuple = [] for key in import_dict_objects.keys(): _lowerCamelCase : Optional[Any] = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) _lowerCamelCase : Optional[Any] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _lowerCamelCase : Dict = '''base imports''' if key == '''none''' else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Optional[int] = [] for root, _, files in os.walk(A_ ): if "__init__.py" in files: _lowerCamelCase : Optional[Any] = os.path.join(A_, '''__init__.py''' ) _lowerCamelCase : Optional[Any] = parse_init(A_ ) if objects is not None: _lowerCamelCase : List[Any] = analyze_results(*A_ ) if len(A_ ) > 0: _lowerCamelCase : List[str] = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('''\n'''.join(A_ ) ) if len(A_ ) > 0: raise ValueError('''\n\n'''.join(A_ ) ) def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Tuple = [] for path, directories, files in os.walk(A_ ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(A_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(A_ ) / folder).glob('''*.py''' ) ) ) == 0: continue _lowerCamelCase : Tuple = str((Path(A_ ) / folder).relative_to(A_ ) ) _lowerCamelCase : List[str] = short_path.replace(os.path.sep, '''.''' ) submodules.append(A_ ) for fname in files: if fname == "__init__.py": continue _lowerCamelCase : Dict = str((Path(A_ ) / fname).relative_to(A_ ) ) _lowerCamelCase : Union[str, Any] = short_path.replace('''.py''', '''''' ).replace(os.path.sep, '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(A_ ) return submodules lowerCAmelCase__ = [ '''convert_pytorch_checkpoint_to_tf2''', '''modeling_flax_pytorch_utils''', ] def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Any = importlib.util.spec_from_file_location( '''transformers''', os.path.join(A_, '''__init__.py''' ), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) _lowerCamelCase : Any = spec.loader.load_module() _lowerCamelCase : Any = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(A_ ) > 0: _lowerCamelCase : Dict = '''\n'''.join(F'''- {module}''' for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registered in the main init of Transformers:\n''' F'''{list_of_modules}\n''' '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
72
"""simple docstring""" def snake_case_ ( A_ : list[list] ): '''simple docstring''' _lowerCamelCase : Optional[int] = current_set.copy() for row_index, row in enumerate(A_ ): _lowerCamelCase : Tuple = row[0] for column_index, column in enumerate(A_ ): if magnitude == 0: _lowerCamelCase : List[Any] = column continue _lowerCamelCase : List[Any] = column / magnitude # Subtract to cancel term _lowerCamelCase : Union[str, Any] = current_set[0] _lowerCamelCase : Dict = [first_row] _lowerCamelCase : str = current_set[1::] for row in current_set: _lowerCamelCase : Union[str, Any] = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(A_ ) continue for column_index in range(len(A_ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(A_ ) # Create next recursion iteration set if len(final_set[0] ) != 3: _lowerCamelCase : Any = final_set[0] _lowerCamelCase : Any = [] _lowerCamelCase : Optional[int] = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _lowerCamelCase : Dict = simplify(A_ ) for i in range(len(A_ ) ): resultant[i].insert(0, current_first_column[i] ) resultant.insert(0, A_ ) _lowerCamelCase : Tuple = resultant return final_set def snake_case_ ( A_ : list[list] ): '''simple docstring''' if len(A_ ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) _lowerCamelCase : Dict = len(A_ ) + 1 if any(len(A_ ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(A_, (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(A_ ) == 1: return [equations[0][-1] / equations[0][0]] _lowerCamelCase : Optional[Any] = equations.copy() if any(0 in row for row in data_set ): _lowerCamelCase : str = data_set.copy() _lowerCamelCase : List[Any] = [] for row_index, row in enumerate(A_ ): if 0 not in row: _lowerCamelCase : Union[str, Any] = data_set.pop(A_ ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0, A_ ) _lowerCamelCase : List[str] = data_set.copy() _lowerCamelCase : int = simplify(A_ ) _lowerCamelCase : int = simplified[::-1] _lowerCamelCase : list = [] for row in simplified: _lowerCamelCase : Tuple = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _lowerCamelCase : Optional[Any] = row.copy()[: len(A_ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(A_ ) == 0: solutions.append(0 ) continue _lowerCamelCase : Tuple = temp_row[1::] _lowerCamelCase : Tuple = temp_row[::-1] for column_index, column in enumerate(A_ ): current_solution -= column * solutions[column_index] solutions.append(A_ ) _lowerCamelCase : Optional[int] = [] for item in solutions: final.append(float(round(A_, 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
72
1
"""simple docstring""" from collections import defaultdict class __snake_case : def __init__( self : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : str ): """simple docstring""" _lowerCamelCase : str = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 _lowerCamelCase : str = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(__lowerCAmelCase ) ) ] _lowerCamelCase : Tuple = defaultdict(__lowerCAmelCase ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 _lowerCamelCase : str = (1 << len(__lowerCAmelCase )) - 1 def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ): """simple docstring""" if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement _lowerCamelCase : Tuple = self.count_ways_until(__lowerCAmelCase , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. _lowerCamelCase : Tuple = total_ways_util return self.dp[mask][task_no] def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Tuple ): """simple docstring""" for i in range(len(__lowerCAmelCase ) ): for j in task_performed[i]: self.task[j].append(__lowerCAmelCase ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": lowerCAmelCase__ = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. lowerCAmelCase__ = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
72
"""simple docstring""" import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __snake_case ( _lowercase): snake_case__ : List[Any] = "Speech2TextFeatureExtractor" snake_case__ : Union[str, Any] = "Speech2TextTokenizer" def __init__( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" super().__init__(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : List[str] = self.feature_extractor _lowerCamelCase : str = False def __call__( self : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) _lowerCamelCase : str = kwargs.pop('''raw_speech''' ) else: _lowerCamelCase : Tuple = kwargs.pop('''audio''' , __lowerCAmelCase ) _lowerCamelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = kwargs.pop('''text''' , __lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: _lowerCamelCase : List[Any] = args[0] _lowerCamelCase : int = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: _lowerCamelCase : List[Any] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None: _lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: _lowerCamelCase : List[str] = encodings['''input_ids'''] return inputs def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ): """simple docstring""" return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @contextmanager def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) _lowerCamelCase : Union[str, Any] = True _lowerCamelCase : Any = self.tokenizer yield _lowerCamelCase : List[str] = self.feature_extractor _lowerCamelCase : Tuple = False
72
1
"""simple docstring""" import argparse from collections import defaultdict import yaml lowerCAmelCase__ = '''docs/source/en/_toctree.yml''' def snake_case_ ( A_ : str ): '''simple docstring''' _lowerCamelCase : List[Any] = defaultdict(A_ ) _lowerCamelCase : List[Any] = [] _lowerCamelCase : Tuple = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} ) else: new_doc_list.append(A_ ) _lowerCamelCase : Optional[Any] = new_doc_list _lowerCamelCase : Tuple = [key for key, value in counts.items() if value > 1] _lowerCamelCase : List[str] = [] for duplicate_key in duplicates: _lowerCamelCase : Optional[int] = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} ) if len(A_ ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] ) _lowerCamelCase : Optional[int] = sorted(A_, key=lambda A_ : s["title"].lower() ) # "overview" gets special treatment and is always first if len(A_ ) > 1: raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' ) overview_doc.extend(A_ ) # Sort return overview_doc def snake_case_ ( A_ : str=False ): '''simple docstring''' with open(A_, encoding='''utf-8''' ) as f: _lowerCamelCase : Union[str, Any] = yaml.safe_load(f.read() ) # Get to the API doc _lowerCamelCase : Union[str, Any] = 0 while content[api_idx]["title"] != "API": api_idx += 1 _lowerCamelCase : List[Any] = content[api_idx]['''sections'''] # Then to the model doc _lowerCamelCase : str = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 _lowerCamelCase : List[str] = api_doc[scheduler_idx]['''sections'''] _lowerCamelCase : Dict = clean_doc_toc(A_ ) _lowerCamelCase : Tuple = False if new_scheduler_doc != scheduler_doc: _lowerCamelCase : List[str] = True if overwrite: _lowerCamelCase : str = new_scheduler_doc if diff: if overwrite: _lowerCamelCase : List[Any] = api_doc with open(A_, '''w''', encoding='''utf-8''' ) as f: f.write(yaml.dump(A_, allow_unicode=A_ ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) def snake_case_ ( A_ : Any=False ): '''simple docstring''' with open(A_, encoding='''utf-8''' ) as f: _lowerCamelCase : Dict = yaml.safe_load(f.read() ) # Get to the API doc _lowerCamelCase : Optional[int] = 0 while content[api_idx]["title"] != "API": api_idx += 1 _lowerCamelCase : Union[str, Any] = content[api_idx]['''sections'''] # Then to the model doc _lowerCamelCase : Union[str, Any] = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 _lowerCamelCase : str = False _lowerCamelCase : Tuple = api_doc[pipeline_idx]['''sections'''] _lowerCamelCase : Any = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: _lowerCamelCase : List[str] = pipeline_doc['''section'''] _lowerCamelCase : Optional[Any] = clean_doc_toc(A_ ) if overwrite: _lowerCamelCase : List[str] = new_sub_pipeline_doc new_pipeline_docs.append(A_ ) # sort overall pipeline doc _lowerCamelCase : Any = clean_doc_toc(A_ ) if new_pipeline_docs != pipeline_docs: _lowerCamelCase : Dict = True if overwrite: _lowerCamelCase : Optional[int] = new_pipeline_docs if diff: if overwrite: _lowerCamelCase : Union[str, Any] = api_doc with open(A_, '''w''', encoding='''utf-8''' ) as f: f.write(yaml.dump(A_, allow_unicode=A_ ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') lowerCAmelCase__ = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
72
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
1
"""simple docstring""" from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''', } class __snake_case ( _lowercase): snake_case__ : str = "efficientnet" def __init__( self : Optional[Any] , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 6_0_0 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __lowerCAmelCase : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2_5_6_0 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.0_01 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : str , ): """simple docstring""" super().__init__(**__lowerCAmelCase ) _lowerCamelCase : Dict = num_channels _lowerCamelCase : str = image_size _lowerCamelCase : str = width_coefficient _lowerCamelCase : Union[str, Any] = depth_coefficient _lowerCamelCase : Dict = depth_divisor _lowerCamelCase : List[Any] = kernel_sizes _lowerCamelCase : List[Any] = in_channels _lowerCamelCase : Any = out_channels _lowerCamelCase : List[Any] = depthwise_padding _lowerCamelCase : Any = strides _lowerCamelCase : Dict = num_block_repeats _lowerCamelCase : Tuple = expand_ratios _lowerCamelCase : Any = squeeze_expansion_ratio _lowerCamelCase : Union[str, Any] = hidden_act _lowerCamelCase : List[Any] = hidden_dim _lowerCamelCase : Optional[int] = pooling_type _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : Tuple = batch_norm_eps _lowerCamelCase : Tuple = batch_norm_momentum _lowerCamelCase : Any = dropout_rate _lowerCamelCase : Any = drop_connect_rate _lowerCamelCase : int = sum(__lowerCAmelCase ) * 4 class __snake_case ( _lowercase): snake_case__ : Optional[Any] = version.parse("1.11") @property def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return 1E-5
72
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_autoformer''': [ '''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AutoformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AutoformerForPrediction''', '''AutoformerModel''', '''AutoformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
1
"""simple docstring""" from collections import defaultdict def snake_case_ ( A_ : int ): '''simple docstring''' _lowerCamelCase : Dict = 1 _lowerCamelCase : List[Any] = True for v in tree[start]: if v not in visited: ret += dfs(A_ ) if ret % 2 == 0: cuts.append(A_ ) return ret def snake_case_ ( ): '''simple docstring''' dfs(1 ) if __name__ == "__main__": lowerCAmelCase__ , lowerCAmelCase__ = 10, 9 lowerCAmelCase__ = defaultdict(list) lowerCAmelCase__ = {} lowerCAmelCase__ = [] lowerCAmelCase__ = 0 lowerCAmelCase__ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
72
"""simple docstring""" import math def snake_case_ ( A_ : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def snake_case_ ( A_ : float = 0.1 ): '''simple docstring''' _lowerCamelCase : Optional[int] = 3 _lowerCamelCase : List[str] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ): primes += is_prime(A_ ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
72
1
"""simple docstring""" def snake_case_ ( A_ : int, A_ : int ): '''simple docstring''' _lowerCamelCase : Tuple = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): _lowerCamelCase : List[Any] = n - k # Calculate C(n,k) for i in range(A_ ): result *= n - i result //= i + 1 return result def snake_case_ ( A_ : int ): '''simple docstring''' return binomial_coefficient(2 * node_count, A_ ) // (node_count + 1) def snake_case_ ( A_ : int ): '''simple docstring''' if n < 0: raise ValueError('''factorial() not defined for negative values''' ) _lowerCamelCase : Dict = 1 for i in range(1, n + 1 ): result *= i return result def snake_case_ ( A_ : int ): '''simple docstring''' return catalan_number(A_ ) * factorial(A_ ) if __name__ == "__main__": lowerCAmelCase__ = int(input('''Enter the number of nodes: ''').strip() or 0) if node_count <= 0: raise ValueError('''We need some nodes to work with.''') print( F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """ F"""binary trees and {catalan_number(node_count)} binary search trees.""" )
72
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _lowerCamelCase : Union[str, Any] = TextStreamer(__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCamelCase : int = cs.out[:-1] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : List[str] = tokenizer.decode(greedy_ids[0] ) _lowerCamelCase : Tuple = TextIteratorStreamer(__lowerCAmelCase ) _lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase ) thread.start() _lowerCamelCase : int = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : int = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = greedy_ids[:, input_ids.shape[1] :] _lowerCamelCase : int = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _lowerCamelCase : Any = TextStreamer(__lowerCAmelCase , skip_prompt=__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCamelCase : Union[str, Any] = cs.out[:-1] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' ) _lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : str = -1 _lowerCamelCase : Any = torch.ones((1, 5) , device=__lowerCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: _lowerCamelCase : List[Any] = TextStreamer(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _lowerCamelCase : Any = cs.out[:-1] # Remove the final "\n" _lowerCamelCase : int = tokenizer(__lowerCAmelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = -1 _lowerCamelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = TextIteratorStreamer(__lowerCAmelCase , timeout=0.0_01 ) _lowerCamelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__lowerCAmelCase ): _lowerCamelCase : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text
72
1
"""simple docstring""" import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __snake_case ( _lowercase): def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : int = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__lowerCAmelCase , '''width_multiplier''' ) ) class __snake_case : def __init__( self : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=1_3 , __lowerCAmelCase : List[Any]=6_4 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : int="swish" , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : int=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Tuple=1_0 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=0.25 , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : List[str]=0.0 , ): """simple docstring""" _lowerCamelCase : List[str] = parent _lowerCamelCase : Tuple = batch_size _lowerCamelCase : Optional[int] = image_size _lowerCamelCase : Union[str, Any] = patch_size _lowerCamelCase : Optional[int] = num_channels _lowerCamelCase : Union[str, Any] = make_divisible(5_1_2 * width_multiplier , divisor=8 ) _lowerCamelCase : Union[str, Any] = hidden_act _lowerCamelCase : Dict = conv_kernel_size _lowerCamelCase : List[Any] = output_stride _lowerCamelCase : Any = classifier_dropout_prob _lowerCamelCase : Tuple = use_labels _lowerCamelCase : List[str] = is_training _lowerCamelCase : Optional[int] = num_labels _lowerCamelCase : str = initializer_range _lowerCamelCase : List[Any] = scope _lowerCamelCase : Any = width_multiplier _lowerCamelCase : int = ffn_dropout _lowerCamelCase : Union[str, Any] = attn_dropout def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCamelCase : Optional[int] = None _lowerCamelCase : Union[str, Any] = None if self.use_labels: _lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) _lowerCamelCase : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _lowerCamelCase : int = self.get_config() return config, pixel_values, labels, pixel_labels def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = MobileViTVaModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : str ): """simple docstring""" _lowerCamelCase : List[Any] = self.num_labels _lowerCamelCase : Optional[Any] = MobileViTVaForImageClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Any = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : List[str] = self.num_labels _lowerCamelCase : List[str] = MobileViTVaForSemanticSegmentation(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : str = model(__lowerCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) _lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs() _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs _lowerCamelCase : Any = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __snake_case ( _lowercase , _lowercase , unittest.TestCase): snake_case__ : List[str] = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) snake_case__ : Optional[int] = ( { "feature-extraction": MobileViTVaModel, "image-classification": MobileViTVaForImageClassification, "image-segmentation": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) snake_case__ : int = False snake_case__ : List[str] = False snake_case__ : Tuple = False snake_case__ : int = False def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : int = MobileViTVaModelTester(self ) _lowerCamelCase : Union[str, Any] = MobileViTVaConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" pass @unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" pass @unittest.skip(reason='''MobileViTV2 does not output attentions''' ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Any = model_class(__lowerCAmelCase ) _lowerCamelCase : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Dict = [*signature.parameters.keys()] _lowerCamelCase : List[str] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" def check_hidden_states_output(__lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ): _lowerCamelCase : Tuple = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): _lowerCamelCase : Dict = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) _lowerCamelCase : List[Any] = outputs.hidden_states _lowerCamelCase : List[str] = 5 self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. _lowerCamelCase : Union[str, Any] = 2 for i in range(len(__lowerCAmelCase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) _lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Any = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCamelCase : List[Any] = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : List[Any] = MobileViTVaModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __snake_case ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" return ( MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : Optional[Any] = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to( __lowerCAmelCase ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : Optional[Any] = prepare_img() _lowerCamelCase : Any = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): _lowerCamelCase : Any = model(**__lowerCAmelCase ) # verify the logits _lowerCamelCase : List[Any] = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) _lowerCamelCase : int = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) ) @slow def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : int = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) _lowerCamelCase : Any = model.to(__lowerCAmelCase ) _lowerCamelCase : Tuple = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) _lowerCamelCase : Dict = prepare_img() _lowerCamelCase : Union[str, Any] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): _lowerCamelCase : Optional[int] = model(**__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = outputs.logits # verify the logits _lowerCamelCase : Dict = torch.Size((1, 2_1, 3_2, 3_2) ) self.assertEqual(logits.shape , __lowerCAmelCase ) _lowerCamelCase : Optional[Any] = torch.tensor( [ [[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]], [[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]], [[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]], ] , device=__lowerCAmelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1E-4 ) ) @slow def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) _lowerCamelCase : Tuple = model.to(__lowerCAmelCase ) _lowerCamelCase : Tuple = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) _lowerCamelCase : List[str] = prepare_img() _lowerCamelCase : Optional[int] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): _lowerCamelCase : Dict = model(**__lowerCAmelCase ) _lowerCamelCase : List[str] = outputs.logits.detach().cpu() _lowerCamelCase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase , target_sizes=[(5_0, 6_0)] ) _lowerCamelCase : Dict = torch.Size((5_0, 6_0) ) self.assertEqual(segmentation[0].shape , __lowerCAmelCase ) _lowerCamelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase ) _lowerCamelCase : str = torch.Size((3_2, 3_2) ) self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
72
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase__ = logging.get_logger(__name__) # TODO: upload to AWS lowerCAmelCase__ = { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json''' ), } class __snake_case ( _lowercase): snake_case__ : int = "retribert" def __init__( self : Optional[int] , __lowerCAmelCase : str=3_0_5_2_2 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1E-12 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : str , ): """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) _lowerCamelCase : Dict = vocab_size _lowerCamelCase : Union[str, Any] = hidden_size _lowerCamelCase : Dict = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : int = hidden_act _lowerCamelCase : str = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : List[Any] = attention_probs_dropout_prob _lowerCamelCase : Optional[int] = max_position_embeddings _lowerCamelCase : List[Any] = type_vocab_size _lowerCamelCase : Any = initializer_range _lowerCamelCase : Optional[int] = layer_norm_eps _lowerCamelCase : int = share_encoders _lowerCamelCase : Optional[Any] = projection_dim
72
1
"""simple docstring""" import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class __snake_case ( _lowercase): snake_case__ : Optional[Any] = (CMStochasticIterativeScheduler,) snake_case__ : Tuple = 1_0 def SCREAMING_SNAKE_CASE ( self : str , **__lowerCAmelCase : List[str] ): """simple docstring""" _lowerCamelCase : List[str] = { '''num_train_timesteps''': 2_0_1, '''sigma_min''': 0.0_02, '''sigma_max''': 80.0, } config.update(**__lowerCAmelCase ) return config def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = 1_0 _lowerCamelCase : List[str] = self.get_scheduler_config() _lowerCamelCase : Union[str, Any] = self.scheduler_classes[0](**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = scheduler.timesteps[0] _lowerCamelCase : str = scheduler.timesteps[1] _lowerCamelCase : List[Any] = self.dummy_sample _lowerCamelCase : str = 0.1 * sample _lowerCamelCase : str = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample _lowerCamelCase : Tuple = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config() _lowerCamelCase : int = scheduler_class(**__lowerCAmelCase ) _lowerCamelCase : str = 1 scheduler.set_timesteps(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = scheduler.timesteps _lowerCamelCase : Tuple = torch.manual_seed(0 ) _lowerCamelCase : int = self.dummy_model() _lowerCamelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(__lowerCAmelCase ): # 1. scale model input _lowerCamelCase : Dict = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase ) # 2. predict noise residual _lowerCamelCase : Optional[int] = model(__lowerCAmelCase , __lowerCAmelCase ) # 3. predict previous sample x_t-1 _lowerCamelCase : Any = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample _lowerCamelCase : List[str] = pred_prev_sample _lowerCamelCase : int = torch.sum(torch.abs(__lowerCAmelCase ) ) _lowerCamelCase : str = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2 assert abs(result_mean.item() - 0.25_10 ) < 1E-3 def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = self.scheduler_classes[0] _lowerCamelCase : List[str] = self.get_scheduler_config() _lowerCamelCase : str = scheduler_class(**__lowerCAmelCase ) _lowerCamelCase : int = [1_0_6, 0] scheduler.set_timesteps(timesteps=__lowerCAmelCase ) _lowerCamelCase : str = scheduler.timesteps _lowerCamelCase : str = torch.manual_seed(0 ) _lowerCamelCase : Optional[int] = self.dummy_model() _lowerCamelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input _lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase ) # 2. predict noise residual _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , __lowerCAmelCase ) # 3. predict previous sample x_t-1 _lowerCamelCase : Union[str, Any] = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample _lowerCamelCase : List[str] = pred_prev_sample _lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) ) _lowerCamelCase : Tuple = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2 assert abs(result_mean.item() - 0.45_27 ) < 1E-3 def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : Union[str, Any] = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config() _lowerCamelCase : List[Any] = scheduler_class(**__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = [3_9, 3_0, 1_2, 1_5, 0] with self.assertRaises(__lowerCAmelCase , msg='''`timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : int = self.scheduler_classes[0] _lowerCamelCase : Any = self.get_scheduler_config() _lowerCamelCase : List[Any] = scheduler_class(**__lowerCAmelCase ) _lowerCamelCase : List[Any] = [3_9, 3_0, 1_2, 1, 0] _lowerCamelCase : Optional[Any] = len(__lowerCAmelCase ) with self.assertRaises(__lowerCAmelCase , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=__lowerCAmelCase , timesteps=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : Dict = self.get_scheduler_config() _lowerCamelCase : Dict = scheduler_class(**__lowerCAmelCase ) _lowerCamelCase : Dict = [scheduler.config.num_train_timesteps] with self.assertRaises( __lowerCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=__lowerCAmelCase )
72
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : Optional[int] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : str = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] _lowerCamelCase : Optional[Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : Tuple = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _lowerCamelCase : Any = '''fp16''' self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] _lowerCamelCase : str = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] _lowerCamelCase : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : int = '''fp16''' self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
72
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model'''} lowerCAmelCase__ = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, } lowerCAmelCase__ = { '''moussaKam/mbarthez''': 1024, '''moussaKam/barthez''': 1024, '''moussaKam/barthez-orangesum-title''': 1024, } lowerCAmelCase__ = '''▁''' class __snake_case ( _lowercase): snake_case__ : int = VOCAB_FILES_NAMES snake_case__ : Any = PRETRAINED_VOCAB_FILES_MAP snake_case__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ : Union[str, Any] = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : Optional[int]="<s>" , __lowerCAmelCase : Optional[int]="<unk>" , __lowerCAmelCase : str="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Optional[int] , ): """simple docstring""" _lowerCamelCase : Dict = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token _lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) _lowerCamelCase : Dict = vocab_file _lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__lowerCAmelCase ) ) _lowerCamelCase : Union[str, Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} _lowerCamelCase : int = len(self.sp_model ) - 1 _lowerCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowerCamelCase : List[str] = [self.cls_token_id] _lowerCamelCase : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCAmelCase )) + [1] return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1] def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ): """simple docstring""" _lowerCamelCase : List[Any] = [self.sep_token_id] _lowerCamelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return len(self.sp_model ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : int = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : str ): """simple docstring""" return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : List[str] ): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _lowerCamelCase : List[Any] = self.sp_model.PieceToId(__lowerCAmelCase ) return spm_id if spm_id else self.unk_token_id def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str ): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Dict ): """simple docstring""" _lowerCamelCase : Tuple = [] _lowerCamelCase : Tuple = '''''' _lowerCamelCase : Dict = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__lowerCAmelCase ) + token _lowerCamelCase : Union[str, Any] = True _lowerCamelCase : List[Any] = [] else: current_sub_tokens.append(__lowerCAmelCase ) _lowerCamelCase : List[str] = False out_string += self.sp_model.decode(__lowerCAmelCase ) return out_string.strip() def __getstate__( self : List[str] ): """simple docstring""" _lowerCamelCase : Dict = self.__dict__.copy() _lowerCamelCase : int = None return state def __setstate__( self : int , __lowerCAmelCase : Any ): """simple docstring""" _lowerCamelCase : Union[str, Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _lowerCamelCase : Dict = {} _lowerCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ): """simple docstring""" if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCamelCase : Any = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , '''wb''' ) as fi: _lowerCamelCase : Tuple = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (out_vocab_file,)
72
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class __snake_case : def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=1_0 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=3_2 * 4 , __lowerCAmelCase : Dict=3_2 * 6 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=3_2 , ): """simple docstring""" _lowerCamelCase : List[str] = parent _lowerCamelCase : str = batch_size _lowerCamelCase : Dict = is_training _lowerCamelCase : str = use_auxiliary_loss _lowerCamelCase : Any = num_queries _lowerCamelCase : List[Any] = num_channels _lowerCamelCase : int = min_size _lowerCamelCase : Any = max_size _lowerCamelCase : int = num_labels _lowerCamelCase : List[str] = mask_feature_size def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowerCAmelCase ) _lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5 ).float() _lowerCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long() _lowerCamelCase : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.prepare_config_and_inputs() _lowerCamelCase : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = output.encoder_hidden_states _lowerCamelCase : Tuple = output.pixel_decoder_hidden_states _lowerCamelCase : Dict = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers ) def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=False ): """simple docstring""" with torch.no_grad(): _lowerCamelCase : Optional[int] = MaskFormerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Tuple = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) _lowerCamelCase : List[str] = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() def comm_check_on_output(__lowerCAmelCase : Dict ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _lowerCamelCase : str = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) _lowerCamelCase : List[str] = model( pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __snake_case ( _lowercase , _lowercase , unittest.TestCase): snake_case__ : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () snake_case__ : Any = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) snake_case__ : List[str] = False snake_case__ : List[str] = False snake_case__ : Optional[int] = False snake_case__ : Dict = False def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = MaskFormerModelTester(self ) _lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Dict = [*signature.parameters.keys()] _lowerCamelCase : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: _lowerCamelCase : Union[str, Any] = MaskFormerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2 _lowerCamelCase : Union[str, Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=__lowerCAmelCase ), '''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowerCAmelCase ), '''class_labels''': torch.zeros(2 , 1_0 , device=__lowerCAmelCase ).long(), } _lowerCamelCase : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _lowerCamelCase : Union[str, Any] = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : Any = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _lowerCamelCase : List[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : int = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[int] = True _lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) _lowerCamelCase : List[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCamelCase : Tuple = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _lowerCamelCase : List[str] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCamelCase : Optional[int] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) lowerCAmelCase__ = 1E-4 def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class __snake_case ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : str = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowerCAmelCase ) _lowerCamelCase : Any = self.default_image_processor _lowerCamelCase : List[Any] = prepare_img() _lowerCamelCase : Any = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : Any = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : int = model(**__lowerCAmelCase ) _lowerCamelCase : str = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) _lowerCamelCase : Union[str, Any] = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) _lowerCamelCase : Optional[int] = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[int] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : str = prepare_img() _lowerCamelCase : int = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : str = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : Optional[int] = model(**__lowerCAmelCase ) # masks_queries_logits _lowerCamelCase : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCamelCase : List[str] = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] _lowerCamelCase : Any = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits _lowerCamelCase : List[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCamelCase : str = torch.tensor( [ [1.6512E00, -5.2572E00, -3.3519E00], [3.6169E-02, -5.9025E00, -2.9313E00], [1.0766E-04, -7.7630E00, -5.1263E00], ] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Any = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : Tuple = self.default_image_processor _lowerCamelCase : Tuple = prepare_img() _lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : List[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase ) # masks_queries_logits _lowerCamelCase : List[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCamelCase : int = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] _lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits _lowerCamelCase : Dict = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCamelCase : Any = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : List[str] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , ) _lowerCamelCase : Union[str, Any] = inputs['''pixel_values'''].to(__lowerCAmelCase ) _lowerCamelCase : Dict = [el.to(__lowerCAmelCase ) for el in inputs['''mask_labels''']] _lowerCamelCase : Optional[Any] = [el.to(__lowerCAmelCase ) for el in inputs['''class_labels''']] with torch.no_grad(): _lowerCamelCase : Tuple = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
72
1
"""simple docstring""" import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList lowerCAmelCase__ = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif'''] class __snake_case ( _lowercase): def __init__( self : str , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Optional[int]=1 ): """simple docstring""" _lowerCamelCase : str = tokenizer _lowerCamelCase : List[str] = dataset _lowerCamelCase : List[Any] = len(__lowerCAmelCase ) if n_tasks is None else n_tasks _lowerCamelCase : Dict = n_copies def __iter__( self : Tuple ): """simple docstring""" _lowerCamelCase : List[Any] = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() ) _lowerCamelCase : str = self.tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='''pt''' ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class __snake_case ( _lowercase): def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = start_length _lowerCamelCase : List[Any] = eof_strings _lowerCamelCase : Union[str, Any] = tokenizer def __call__( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , **__lowerCAmelCase : Tuple ): """simple docstring""" _lowerCamelCase : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) _lowerCamelCase : List[str] = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(__lowerCAmelCase ) def snake_case_ ( A_ : List[Any] ): '''simple docstring''' _lowerCamelCase : int = re.split('''(%s)''' % '''|'''.join(A_ ), A_ ) # last string should be "" return "".join(string_list[:-2] ) def snake_case_ ( A_ : List[Any], A_ : str, A_ : Optional[Any], A_ : Optional[int], A_ : List[str], A_ : List[Any]=20, **A_ : Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Optional[Any] = defaultdict(A_ ) # dict of list of generated tokens for step, batch in tqdm(enumerate(A_ ) ): with torch.no_grad(): _lowerCamelCase : List[str] = batch['''ids'''].shape[-1] _lowerCamelCase : List[Any] = accelerator.unwrap_model(A_ ).generate( input_ids=batch['''ids'''][:, : batch['''input_len''']], num_return_sequences=A_, **A_ ) # each task is generated batch_size times _lowerCamelCase : int = batch['''task_id'''].repeat(A_ ) _lowerCamelCase : str = accelerator.pad_across_processes( A_, dim=1, pad_index=tokenizer.pad_token_id ) _lowerCamelCase , _lowerCamelCase : List[str] = accelerator.gather((generated_tokens, generated_tasks) ) _lowerCamelCase : str = generated_tokens.cpu().numpy() _lowerCamelCase : Optional[Any] = generated_tasks.cpu().numpy() for task, generated_tokens in zip(A_, A_ ): gen_token_dict[task].append(A_ ) _lowerCamelCase : List[str] = [[] for _ in range(A_ )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: _lowerCamelCase : List[Any] = tokenizer.decode(A_, skip_special_tokens=A_, clean_up_tokenization_spaces=A_ ) code_gens[task].append(remove_last_block(A_ ) ) return code_gens def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : int = HfArgumentParser(A_ ) _lowerCamelCase : Any = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric _lowerCamelCase : str = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing _lowerCamelCase : Any = '''false''' if args.num_workers is None: _lowerCamelCase : Dict = multiprocessing.cpu_count() # Use dataset load to feed to accelerate _lowerCamelCase : Union[str, Any] = Accelerator() set_seed(args.seed, device_specific=A_ ) # Load model and tokenizer _lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt ) _lowerCamelCase : Optional[int] = tokenizer.eos_token _lowerCamelCase : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings _lowerCamelCase : Union[str, Any] = { '''do_sample''': args.do_sample, '''temperature''': args.temperature, '''max_new_tokens''': args.max_new_tokens, '''top_p''': args.top_p, '''top_k''': args.top_k, '''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0, A_, A_ )] ), } # Load evaluation dataset and metric _lowerCamelCase : Optional[int] = load_dataset('''openai_humaneval''' ) _lowerCamelCase : Optional[Any] = load_metric('''code_eval''' ) _lowerCamelCase : int = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] ) _lowerCamelCase : int = args.n_samples // args.batch_size _lowerCamelCase : int = TokenizedDataset(A_, human_eval['''test'''], n_copies=A_, n_tasks=A_ ) # do not confuse args.batch_size, which is actually the num_return_sequences _lowerCamelCase : Optional[int] = DataLoader(A_, batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: _lowerCamelCase : List[str] = code_eval_metric.compute(references=[''''''], predictions=[['''''']] ) except ValueError as exception: print( '''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`''' ''' flag to enable code evaluation.''' ) raise exception _lowerCamelCase , _lowerCamelCase : List[str] = accelerator.prepare(A_, A_ ) _lowerCamelCase : Tuple = complete_code( A_, A_, A_, A_, n_tasks=A_, batch_size=args.batch_size, **A_, ) if accelerator.is_main_process: _lowerCamelCase : List[str] = [] for task in tqdm(range(A_ ) ): _lowerCamelCase : Dict = human_eval['''test'''][task]['''test'''] _lowerCamelCase : Optional[Any] = F'''check({human_eval["test"][task]["entry_point"]})''' references.append('''\n''' + test_func + '''\n''' + entry_point ) # Evaluate completions with "code_eval" metric _lowerCamelCase , _lowerCamelCase : str = code_eval_metric.compute( references=A_, predictions=A_, num_workers=args.num_workers ) print(F'''Results: {pass_at_k}''' ) # Save results to json file with open(args.output_file, '''w''' ) as fp: json.dump(A_, A_ ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
72
"""simple docstring""" lowerCAmelCase__ = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def snake_case_ ( A_ : dict, A_ : int, A_ : int ): '''simple docstring''' _lowerCamelCase : List[str] = set() # keep track of all the paths to be checked _lowerCamelCase : str = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _lowerCamelCase : str = queue.pop(0 ) # get the last node from the path _lowerCamelCase : List[Any] = path[-1] if node not in explored: _lowerCamelCase : Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _lowerCamelCase : Union[str, Any] = list(A_ ) new_path.append(A_ ) queue.append(A_ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(A_ ) # in case there's no path between the 2 nodes return [] def snake_case_ ( A_ : dict, A_ : int, A_ : Dict ): '''simple docstring''' if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _lowerCamelCase : Optional[int] = [start] _lowerCamelCase : int = set(A_ ) # Keep tab on distances from `start` node. _lowerCamelCase : int = {start: 0, target: -1} while queue: _lowerCamelCase : Optional[Any] = queue.pop(0 ) if node == target: _lowerCamelCase : Any = ( dist[node] if dist[target] == -1 else min(dist[target], dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(A_ ) queue.append(A_ ) _lowerCamelCase : Any = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
72
1
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = {'''configuration_mmbt''': ['''MMBTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings'''] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
"""simple docstring""" import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def snake_case_ ( A_ : Tuple, A_ : int, A_ : Dict ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = LxmertConfig.from_json_file(A_ ) print(F'''Building PyTorch model from configuration: {config}''' ) _lowerCamelCase : List[str] = LxmertForPreTraining(A_ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(A_, A_, A_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), A_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
72
1
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] lowerCAmelCase__ = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def snake_case_ ( A_ : List[str] ): '''simple docstring''' _lowerCamelCase : Dict = torch.load(A_, map_location='''cpu''' ) return sd def snake_case_ ( A_ : Union[str, Any], A_ : Optional[Any], A_ : List[Any]=rename_keys_prefix ): '''simple docstring''' _lowerCamelCase : Optional[Any] = OrderedDict() _lowerCamelCase : Optional[int] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue _lowerCamelCase : Optional[Any] = key for name_pair in rename_keys_prefix: _lowerCamelCase : Optional[Any] = new_key.replace(name_pair[0], name_pair[1] ) _lowerCamelCase : Any = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately _lowerCamelCase : List[Any] = new_d['''cls.predictions.bias'''] return new_d @torch.no_grad() def snake_case_ ( A_ : int, A_ : str ): '''simple docstring''' assert ( checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: _lowerCamelCase : List[str] = '''pretraining''' if "vcr" in checkpoint_path: _lowerCamelCase : List[str] = {'''visual_embedding_dim''': 5_12} elif "vqa_advanced" in checkpoint_path: _lowerCamelCase : str = {'''visual_embedding_dim''': 20_48} elif "vqa" in checkpoint_path: _lowerCamelCase : Union[str, Any] = {'''visual_embedding_dim''': 20_48} elif "nlvr" in checkpoint_path: _lowerCamelCase : Union[str, Any] = {'''visual_embedding_dim''': 10_24} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: _lowerCamelCase : Dict = {'''visual_embedding_dim''': 5_12} _lowerCamelCase : Tuple = '''multichoice''' elif "vqa_advanced" in checkpoint_path: _lowerCamelCase : Dict = {'''visual_embedding_dim''': 20_48} _lowerCamelCase : Dict = '''vqa_advanced''' elif "vqa" in checkpoint_path: _lowerCamelCase : Dict = {'''visual_embedding_dim''': 20_48, '''num_labels''': 31_29} _lowerCamelCase : Dict = '''vqa''' elif "nlvr" in checkpoint_path: _lowerCamelCase : Union[str, Any] = { '''visual_embedding_dim''': 10_24, '''num_labels''': 2, } _lowerCamelCase : Optional[int] = '''nlvr''' _lowerCamelCase : Any = VisualBertConfig(**A_ ) # Load State Dict _lowerCamelCase : Tuple = load_state_dict(A_ ) _lowerCamelCase : Union[str, Any] = get_new_dict(A_, A_ ) if model_type == "pretraining": _lowerCamelCase : Union[str, Any] = VisualBertForPreTraining(A_ ) elif model_type == "vqa": _lowerCamelCase : Union[str, Any] = VisualBertForQuestionAnswering(A_ ) elif model_type == "nlvr": _lowerCamelCase : Any = VisualBertForVisualReasoning(A_ ) elif model_type == "multichoice": _lowerCamelCase : Dict = VisualBertForMultipleChoice(A_ ) model.load_state_dict(A_ ) # Save Checkpoints Path(A_ ).mkdir(exist_ok=A_ ) model.save_pretrained(A_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') lowerCAmelCase__ = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
72
"""simple docstring""" from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def snake_case_ ( ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : int = 9, 14 # noqa: F841 _lowerCamelCase : List[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _lowerCamelCase : Any = defaultdict(A_ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) _lowerCamelCase : List[str] = mst(A_ ) _lowerCamelCase : Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _lowerCamelCase : int = tuple(answer[:2] ) _lowerCamelCase : int = tuple(edge[::-1] ) assert edge in result or reverse in result
72
1
"""simple docstring""" from __future__ import annotations def snake_case_ ( A_ : list[int], A_ : list[int], A_ : int ): '''simple docstring''' _lowerCamelCase : List[str] = list(range(len(A_ ) ) ) _lowerCamelCase : int = [v / w for v, w in zip(A_, A_ )] index.sort(key=lambda A_ : ratio[i], reverse=A_ ) _lowerCamelCase : float = 0 _lowerCamelCase : list[float] = [0] * len(A_ ) for i in index: if weight[i] <= capacity: _lowerCamelCase : List[str] = 1 max_value += value[i] capacity -= weight[i] else: _lowerCamelCase : Union[str, Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
72
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all BART models at https://huggingface.co/models?filter=bart lowerCAmelCase__ = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''', }, } lowerCAmelCase__ = { '''facebook/bart-base''': 1024, '''facebook/bart-large''': 1024, '''facebook/bart-large-mnli''': 1024, '''facebook/bart-large-cnn''': 1024, '''facebook/bart-large-xsum''': 1024, '''yjernite/bart_eli5''': 1024, } class __snake_case ( _lowercase): snake_case__ : Any = VOCAB_FILES_NAMES snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ : Optional[int] = ["input_ids", "attention_mask"] snake_case__ : Any = BartTokenizer def __init__( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]="replace" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Union[str, Any]="<unk>" , __lowerCAmelCase : Any="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Optional[Any] , ): """simple docstring""" super().__init__( __lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , ) _lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space: _lowerCamelCase : Dict = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) ) _lowerCamelCase : Any = add_prefix_space _lowerCamelCase : int = pre_tok_class(**__lowerCAmelCase ) _lowerCamelCase : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _lowerCamelCase : List[str] = '''post_processor''' _lowerCamelCase : List[str] = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) if tokenizer_component_instance: _lowerCamelCase : int = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCamelCase : Tuple = tuple(state['''sep'''] ) if "cls" in state: _lowerCamelCase : int = tuple(state['''cls'''] ) _lowerCamelCase : Union[str, Any] = False if state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space: _lowerCamelCase : Dict = add_prefix_space _lowerCamelCase : Optional[Any] = True if state.get('''trim_offsets''' , __lowerCAmelCase ) != trim_offsets: _lowerCamelCase : Any = trim_offsets _lowerCamelCase : str = True if changes_to_apply: _lowerCamelCase : List[str] = getattr(__lowerCAmelCase , state.pop('''type''' ) ) _lowerCamelCase : str = component_class(**__lowerCAmelCase ) setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : Tuple = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value _lowerCamelCase : str = value def SCREAMING_SNAKE_CASE ( self : int , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : Any = kwargs.get('''is_split_into_words''' , __lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ): """simple docstring""" _lowerCamelCase : Tuple = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase ) return tuple(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=None ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ): """simple docstring""" _lowerCamelCase : List[str] = [self.sep_token_id] _lowerCamelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
72
1
"""simple docstring""" from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class __snake_case ( _lowercase , _lowercase): @register_to_config def __init__( self : int , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None ): """simple docstring""" super().__init__() _lowerCamelCase : Tuple = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" _lowerCamelCase : Dict = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ) else: _lowerCamelCase : Dict = None _lowerCamelCase : List[Any] = torch.nn.Parameter(__lowerCAmelCase ) class __snake_case ( _lowercase): snake_case__ : VQModel snake_case__ : CLIPTextModel snake_case__ : CLIPTokenizer snake_case__ : TransformeraDModel snake_case__ : LearnedClassifierFreeSamplingEmbeddings snake_case__ : VQDiffusionScheduler def __init__( self : List[str] , __lowerCAmelCase : VQModel , __lowerCAmelCase : CLIPTextModel , __lowerCAmelCase : CLIPTokenizer , __lowerCAmelCase : TransformeraDModel , __lowerCAmelCase : VQDiffusionScheduler , __lowerCAmelCase : LearnedClassifierFreeSamplingEmbeddings , ): """simple docstring""" super().__init__() self.register_modules( vqvae=__lowerCAmelCase , transformer=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , scheduler=__lowerCAmelCase , learned_classifier_free_sampling_embeddings=__lowerCAmelCase , ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ): """simple docstring""" _lowerCamelCase : Any = len(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else 1 # get prompt text embeddings _lowerCamelCase : Optional[int] = self.tokenizer( __lowerCAmelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , ) _lowerCamelCase : Dict = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _lowerCamelCase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( '''The following part of your input was truncated because CLIP can only handle sequences up to''' f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) _lowerCamelCase : Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length] _lowerCamelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 _lowerCamelCase : Tuple = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__lowerCAmelCase ) # duplicate text embeddings for each generation per prompt _lowerCamelCase : Any = prompt_embeds.repeat_interleave(__lowerCAmelCase , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: _lowerCamelCase : str = self.learned_classifier_free_sampling_embeddings.embeddings _lowerCamelCase : Any = negative_prompt_embeds.unsqueeze(0 ).repeat(__lowerCAmelCase , 1 , 1 ) else: _lowerCamelCase : str = [''''''] * batch_size _lowerCamelCase : Dict = text_input_ids.shape[-1] _lowerCamelCase : Tuple = self.tokenizer( __lowerCAmelCase , padding='''max_length''' , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' , ) _lowerCamelCase : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings _lowerCamelCase : str = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__lowerCAmelCase ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _lowerCamelCase : Any = negative_prompt_embeds.shape[1] _lowerCamelCase : Optional[int] = negative_prompt_embeds.repeat(1 , __lowerCAmelCase , 1 ) _lowerCamelCase : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __lowerCAmelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _lowerCamelCase : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self : Optional[Any] , __lowerCAmelCase : Union[str, List[str]] , __lowerCAmelCase : int = 1_0_0 , __lowerCAmelCase : float = 5.0 , __lowerCAmelCase : float = 1.0 , __lowerCAmelCase : int = 1 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCAmelCase : int = 1 , ): """simple docstring""" if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _lowerCamelCase : str = 1 elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): _lowerCamelCase : str = len(__lowerCAmelCase ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__lowerCAmelCase )}''' ) _lowerCamelCase : List[str] = batch_size * num_images_per_prompt _lowerCamelCase : Optional[Any] = guidance_scale > 1.0 _lowerCamelCase : Tuple = self._encode_prompt(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(__lowerCAmelCase )}.''' ) # get the initial completely masked latents unless the user supplied it _lowerCamelCase : int = (batch_size, self.transformer.num_latent_pixels) if latents is None: _lowerCamelCase : int = self.transformer.num_vector_embeds - 1 _lowerCamelCase : Optional[int] = torch.full(__lowerCAmelCase , __lowerCAmelCase ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( '''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,''' f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) _lowerCamelCase : Any = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__lowerCAmelCase , device=self.device ) _lowerCamelCase : List[str] = self.scheduler.timesteps.to(self.device ) _lowerCamelCase : int = latents for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ): # expand the sample if we are doing classifier free guidance _lowerCamelCase : str = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` _lowerCamelCase : Optional[int] = self.transformer(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , timestep=__lowerCAmelCase ).sample if do_classifier_free_guidance: _lowerCamelCase , _lowerCamelCase : Any = model_output.chunk(2 ) _lowerCamelCase : List[Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(__lowerCAmelCase , dim=1 , keepdim=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = self.truncate(__lowerCAmelCase , __lowerCAmelCase ) # remove `log(0)`'s (`-inf`s) _lowerCamelCase : int = model_output.clamp(-7_0 ) # compute the previous noisy sample x_t -> x_t-1 _lowerCamelCase : Optional[int] = self.scheduler.step(__lowerCAmelCase , timestep=__lowerCAmelCase , sample=__lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Tuple = self.vqvae.config.vq_embed_dim _lowerCamelCase : List[str] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) _lowerCamelCase : List[Any] = self.vqvae.quantize.get_codebook_entry(__lowerCAmelCase , shape=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = self.vqvae.decode(__lowerCAmelCase , force_not_quantize=__lowerCAmelCase ).sample _lowerCamelCase : List[str] = (image / 2 + 0.5).clamp(0 , 1 ) _lowerCamelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _lowerCamelCase : List[str] = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : float ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : str = torch.sort(__lowerCAmelCase , 1 , descending=__lowerCAmelCase ) _lowerCamelCase : List[Any] = torch.exp(__lowerCAmelCase ) _lowerCamelCase : Tuple = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out _lowerCamelCase : Any = torch.full_like(keep_mask[:, 0:1, :] , __lowerCAmelCase ) _lowerCamelCase : Optional[Any] = torch.cat((all_true, keep_mask) , dim=1 ) _lowerCamelCase : Tuple = keep_mask[:, :-1, :] _lowerCamelCase : List[str] = keep_mask.gather(1 , indices.argsort(1 ) ) _lowerCamelCase : Union[str, Any] = log_p_x_0.clone() _lowerCamelCase : Optional[Any] = -torch.inf # -inf = log(0) return rv
72
"""simple docstring""" from __future__ import annotations def snake_case_ ( A_ : str ): '''simple docstring''' return [ord(A_ ) - 96 for elem in plain] def snake_case_ ( A_ : list[int] ): '''simple docstring''' return "".join(chr(elem + 96 ) for elem in encoded ) def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Dict = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''', A_ ) print('''Decoded:''', decode(A_ ) ) if __name__ == "__main__": main()
72
1
"""simple docstring""" import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging lowerCAmelCase__ = logging.get_logger(__name__) def snake_case_ ( A_ : List[Any]=None, A_ : Any=None ): '''simple docstring''' return field(default_factory=lambda: default, metadata=A_ ) @dataclass class __snake_case : snake_case__ : List[str] = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) snake_case__ : List[int] = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"}) snake_case__ : List[int] = list_field( default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) snake_case__ : bool = field( default=_lowercase , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) snake_case__ : bool = field( default=_lowercase , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) snake_case__ : bool = field( default=_lowercase , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."}) snake_case__ : bool = field(default=_lowercase , metadata={"help": "Use FP16 to accelerate inference."}) snake_case__ : bool = field(default=_lowercase , metadata={"help": "Benchmark training of model"}) snake_case__ : bool = field(default=_lowercase , metadata={"help": "Verbose memory tracing"}) snake_case__ : bool = field( default=_lowercase , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) snake_case__ : bool = field( default=_lowercase , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) snake_case__ : bool = field(default=_lowercase , metadata={"help": "Trace memory line by line"}) snake_case__ : bool = field(default=_lowercase , metadata={"help": "Save result to a CSV file"}) snake_case__ : bool = field(default=_lowercase , metadata={"help": "Save all print statements in a log file"}) snake_case__ : bool = field(default=_lowercase , metadata={"help": "Whether to print environment information"}) snake_case__ : bool = field( default=_lowercase , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) snake_case__ : str = field( default=f"""inference_time_{round(time())}.csv""" , metadata={"help": "CSV filename used if saving time results to csv."} , ) snake_case__ : str = field( default=f"""inference_memory_{round(time())}.csv""" , metadata={"help": "CSV filename used if saving memory results to csv."} , ) snake_case__ : str = field( default=f"""train_time_{round(time())}.csv""" , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) snake_case__ : str = field( default=f"""train_memory_{round(time())}.csv""" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) snake_case__ : str = field( default=f"""env_info_{round(time())}.csv""" , metadata={"help": "CSV filename used if saving environment information."} , ) snake_case__ : str = field( default=f"""log_{round(time())}.csv""" , metadata={"help": "Log filename used if print statements are saved in log."} , ) snake_case__ : int = field(default=3 , metadata={"help": "Times an experiment will be run."}) snake_case__ : bool = field( default=_lowercase , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" warnings.warn( f'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils''' ''' are deprecated in general and it is advised to use external Benchmarking libraries ''' ''' to benchmark Transformer models.''' , __lowerCAmelCase , ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" if len(self.models ) <= 0: raise ValueError( '''Please make sure you provide at least one model name / model identifier, *e.g.* `--models''' ''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' ) return self.models @property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" if not self.multi_process: return False elif self.is_tpu: logger.info('''Multiprocessing is currently not possible on TPU.''' ) return False else: return True
72
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { '''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''], '''tokenization_lxmert''': ['''LxmertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''LxmertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''LxmertEncoder''', '''LxmertForPreTraining''', '''LxmertForQuestionAnswering''', '''LxmertModel''', '''LxmertPreTrainedModel''', '''LxmertVisualFeatureEncoder''', '''LxmertXLayer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLxmertForPreTraining''', '''TFLxmertMainLayer''', '''TFLxmertModel''', '''TFLxmertPreTrainedModel''', '''TFLxmertVisualFeatureEncoder''', ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
1
"""simple docstring""" import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def snake_case_ ( A_ : Optional[Any], A_ : Union[str, Any]="shi-labs/oneformer_demo" ): '''simple docstring''' with open(hf_hub_download(A_, A_, repo_type='''dataset''' ), '''r''' ) as f: _lowerCamelCase : int = json.load(A_ ) _lowerCamelCase : Union[str, Any] = {} _lowerCamelCase : Dict = [] _lowerCamelCase : List[Any] = [] for key, info in class_info.items(): _lowerCamelCase : Optional[int] = info['''name'''] class_names.append(info['''name'''] ) if info["isthing"]: thing_ids.append(int(A_ ) ) _lowerCamelCase : Union[str, Any] = thing_ids _lowerCamelCase : List[Any] = class_names return metadata class __snake_case ( unittest.TestCase): def __init__( self : int , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any]=7 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Union[str, Any]=3_0 , __lowerCAmelCase : str=4_0_0 , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Any=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=1_0 , __lowerCAmelCase : int=False , __lowerCAmelCase : int=2_5_5 , __lowerCAmelCase : List[str]="shi-labs/oneformer_demo" , __lowerCAmelCase : str="ade20k_panoptic.json" , __lowerCAmelCase : List[Any]=1_0 , ): """simple docstring""" _lowerCamelCase : Optional[int] = parent _lowerCamelCase : str = batch_size _lowerCamelCase : Dict = num_channels _lowerCamelCase : Union[str, Any] = min_resolution _lowerCamelCase : str = max_resolution _lowerCamelCase : Optional[Any] = do_resize _lowerCamelCase : int = {'''shortest_edge''': 3_2, '''longest_edge''': 1_3_3_3} if size is None else size _lowerCamelCase : List[Any] = do_normalize _lowerCamelCase : Any = image_mean _lowerCamelCase : List[str] = image_std _lowerCamelCase : List[str] = class_info_file _lowerCamelCase : Optional[Any] = prepare_metadata(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : str = num_text _lowerCamelCase : Any = repo_path # for the post_process_functions _lowerCamelCase : Union[str, Any] = 2 _lowerCamelCase : str = 1_0 _lowerCamelCase : Tuple = 1_0 _lowerCamelCase : List[str] = 3 _lowerCamelCase : Any = 4 _lowerCamelCase : Tuple = num_labels _lowerCamelCase : Tuple = do_reduce_labels _lowerCamelCase : List[str] = ignore_index def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int]=False ): """simple docstring""" if not batched: _lowerCamelCase : Optional[Any] = image_inputs[0] if isinstance(__lowerCAmelCase , Image.Image ): _lowerCamelCase , _lowerCamelCase : List[Any] = image.size else: _lowerCamelCase , _lowerCamelCase : int = image.shape[1], image.shape[2] if w < h: _lowerCamelCase : str = int(self.size['''shortest_edge'''] * h / w ) _lowerCamelCase : Any = self.size['''shortest_edge'''] elif w > h: _lowerCamelCase : Optional[int] = self.size['''shortest_edge'''] _lowerCamelCase : Tuple = int(self.size['''shortest_edge'''] * w / h ) else: _lowerCamelCase : Optional[Any] = self.size['''shortest_edge'''] _lowerCamelCase : Optional[int] = self.size['''shortest_edge'''] else: _lowerCamelCase : List[str] = [] for image in image_inputs: _lowerCamelCase , _lowerCamelCase : str = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _lowerCamelCase : List[Any] = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0] _lowerCamelCase : List[Any] = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1] return expected_height, expected_width def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class __snake_case ( _lowercase , unittest.TestCase): snake_case__ : Union[str, Any] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string snake_case__ : Any = image_processing_class def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Union[str, Any] = OneFormerImageProcessorTester(self ) @property def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" return self.image_processing_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''ignore_index''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''class_info_file''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''num_text''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''repo_path''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''metadata''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_reduce_labels''' ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _lowerCamelCase : Tuple = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values _lowerCamelCase , _lowerCamelCase : str = self.image_processing_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCamelCase , _lowerCamelCase : Tuple = self.image_processing_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = image_processor( __lowerCAmelCase , ['''semantic'''] * len(__lowerCAmelCase ) , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCamelCase : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _lowerCamelCase : Union[str, Any] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values _lowerCamelCase , _lowerCamelCase : Dict = self.image_processing_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCamelCase , _lowerCamelCase : Any = self.image_processing_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) _lowerCamelCase : str = image_processor( __lowerCAmelCase , ['''semantic'''] * len(__lowerCAmelCase ) , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _lowerCamelCase : Union[str, Any] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values _lowerCamelCase , _lowerCamelCase : List[Any] = self.image_processing_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCamelCase , _lowerCamelCase : List[Any] = self.image_processing_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) _lowerCamelCase : List[str] = image_processor( __lowerCAmelCase , ['''semantic'''] * len(__lowerCAmelCase ) , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Dict="np" ): """simple docstring""" _lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict ) # prepare image and target _lowerCamelCase : int = self.image_processing_tester.num_labels _lowerCamelCase : Optional[Any] = None _lowerCamelCase : List[Any] = None _lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowerCAmelCase ) if with_segmentation_maps: _lowerCamelCase : Optional[int] = num_labels if is_instance_map: _lowerCamelCase : List[Any] = list(range(__lowerCAmelCase ) ) * 2 _lowerCamelCase : int = dict(enumerate(__lowerCAmelCase ) ) _lowerCamelCase : List[str] = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": _lowerCamelCase : List[str] = [Image.fromarray(__lowerCAmelCase ) for annotation in annotations] _lowerCamelCase : Tuple = image_processor( __lowerCAmelCase , ['''semantic'''] * len(__lowerCAmelCase ) , __lowerCAmelCase , return_tensors='''pt''' , instance_id_to_semantic_id=__lowerCAmelCase , pad_and_return_pixel_mask=__lowerCAmelCase , ) return inputs def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" def common(__lowerCAmelCase : Dict=False , __lowerCAmelCase : Any=None ): _lowerCamelCase : Optional[Any] = self.comm_get_image_processor_inputs( with_segmentation_maps=__lowerCAmelCase , is_instance_map=__lowerCAmelCase , segmentation_type=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = inputs['''mask_labels'''] _lowerCamelCase : Union[str, Any] = inputs['''class_labels'''] _lowerCamelCase : int = inputs['''pixel_values'''] _lowerCamelCase : Optional[int] = inputs['''text_inputs'''] # check the batch_size for mask_label, class_label, text_input in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(__lowerCAmelCase ) , self.image_processing_tester.num_text ) common() common(is_instance_map=__lowerCAmelCase ) common(is_instance_map=__lowerCAmelCase , segmentation_type='''pil''' ) common(is_instance_map=__lowerCAmelCase , segmentation_type='''pil''' ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = np.zeros((2_0, 5_0) ) _lowerCamelCase : Dict = 1 _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : int = 1 _lowerCamelCase : Optional[int] = binary_mask_to_rle(__lowerCAmelCase ) self.assertEqual(len(__lowerCAmelCase ) , 4 ) self.assertEqual(rle[0] , 2_1 ) self.assertEqual(rle[1] , 4_5 ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) _lowerCamelCase : str = self.image_processing_tester.get_fake_oneformer_outputs() _lowerCamelCase : int = fature_extractor.post_process_semantic_segmentation(__lowerCAmelCase ) self.assertEqual(len(__lowerCAmelCase ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) _lowerCamelCase : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )] _lowerCamelCase : int = fature_extractor.post_process_semantic_segmentation(__lowerCAmelCase , target_sizes=__lowerCAmelCase ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : int = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) _lowerCamelCase : List[str] = self.image_processing_tester.get_fake_oneformer_outputs() _lowerCamelCase : Optional[int] = image_processor.post_process_instance_segmentation(__lowerCAmelCase , threshold=0 ) self.assertTrue(len(__lowerCAmelCase ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('''segmentation''' in el ) self.assertTrue('''segments_info''' in el ) self.assertEqual(type(el['''segments_info'''] ) , __lowerCAmelCase ) self.assertEqual( el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Any = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) _lowerCamelCase : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs() _lowerCamelCase : Optional[Any] = image_processor.post_process_panoptic_segmentation(__lowerCAmelCase , threshold=0 ) self.assertTrue(len(__lowerCAmelCase ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('''segmentation''' in el ) self.assertTrue('''segments_info''' in el ) self.assertEqual(type(el['''segments_info'''] ) , __lowerCAmelCase ) self.assertEqual( el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
72
"""simple docstring""" def snake_case_ ( A_ : list[list[float]] ): '''simple docstring''' _lowerCamelCase : list[list[float]] = [] for data in source_data: for i, el in enumerate(A_ ): if len(A_ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(A_ ) ) return data_lists def snake_case_ ( A_ : list[list[float]], A_ : list[int] ): '''simple docstring''' _lowerCamelCase : list[list[float]] = [] for dlist, weight in zip(A_, A_ ): _lowerCamelCase : Any = min(A_ ) _lowerCamelCase : Optional[Any] = max(A_ ) _lowerCamelCase : list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: _lowerCamelCase : str = F'''Invalid weight of {weight:f} provided''' raise ValueError(A_ ) score_lists.append(A_ ) return score_lists def snake_case_ ( A_ : list[list[float]] ): '''simple docstring''' _lowerCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(A_ ): _lowerCamelCase : List[str] = final_scores[j] + ele return final_scores def snake_case_ ( A_ : list[list[float]], A_ : list[int] ): '''simple docstring''' _lowerCamelCase : Tuple = get_data(A_ ) _lowerCamelCase : Optional[Any] = calculate_each_score(A_, A_ ) _lowerCamelCase : str = generate_final_scores(A_ ) # append scores to source data for i, ele in enumerate(A_ ): source_data[i].append(A_ ) return source_data
72
1
"""simple docstring""" from __future__ import annotations from random import random class __snake_case : def __init__( self : Dict , __lowerCAmelCase : int | None = None ): """simple docstring""" _lowerCamelCase : List[Any] = value _lowerCamelCase : Any = random() _lowerCamelCase : Node | None = None _lowerCamelCase : Node | None = None def __repr__( self : Any ): """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return f'''\'{self.value}: {self.prior:.5}\'''' else: return pformat( {f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 ) def __str__( self : Tuple ): """simple docstring""" _lowerCamelCase : Union[str, Any] = str(self.value ) + ''' ''' _lowerCamelCase : Any = str(self.left or '''''' ) _lowerCamelCase : Any = str(self.right or '''''' ) return value + left + right def snake_case_ ( A_ : Node | None, A_ : int ): '''simple docstring''' if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: _lowerCamelCase , _lowerCamelCase : str = split(root.left, A_ ) return left, root else: _lowerCamelCase , _lowerCamelCase : Dict = split(root.right, A_ ) return root, right def snake_case_ ( A_ : Node | None, A_ : Node | None ): '''simple docstring''' if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: _lowerCamelCase : str = merge(left.right, A_ ) return left else: _lowerCamelCase : str = merge(A_, right.left ) return right def snake_case_ ( A_ : Node | None, A_ : int ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = Node(A_ ) _lowerCamelCase , _lowerCamelCase : List[Any] = split(A_, A_ ) return merge(merge(A_, A_ ), A_ ) def snake_case_ ( A_ : Node | None, A_ : int ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : Any = split(A_, value - 1 ) _lowerCamelCase , _lowerCamelCase : Any = split(A_, A_ ) return merge(A_, A_ ) def snake_case_ ( A_ : Node | None ): '''simple docstring''' if not root: # None return else: inorder(root.left ) print(root.value, end=''',''' ) inorder(root.right ) def snake_case_ ( A_ : Node | None, A_ : str ): '''simple docstring''' for arg in args.split(): if arg[0] == "+": _lowerCamelCase : Any = insert(A_, int(arg[1:] ) ) elif arg[0] == "-": _lowerCamelCase : Optional[Any] = erase(A_, int(arg[1:] ) ) else: print('''Unknown command''' ) return root def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = None print( '''enter numbers to create a tree, + value to add value into treap, ''' '''- value to erase all nodes with value. \'q\' to quit. ''' ) _lowerCamelCase : Optional[int] = input() while args != "q": _lowerCamelCase : List[str] = interact_treap(A_, A_ ) print(A_ ) _lowerCamelCase : Optional[Any] = input() print('''good by!''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
72
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __snake_case ( _lowercase): snake_case__ : List[str] = "unispeech" def __init__( self : List[str] , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Dict=1E-5 , __lowerCAmelCase : Optional[int]="group" , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=1_2_8 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=3_2_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=1_0_0 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict="mean" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Dict=8_0 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=0.5 , **__lowerCAmelCase : Optional[Any] , ): """simple docstring""" super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase ) _lowerCamelCase : Dict = hidden_size _lowerCamelCase : Any = feat_extract_norm _lowerCamelCase : List[Any] = feat_extract_activation _lowerCamelCase : Any = list(__lowerCAmelCase ) _lowerCamelCase : Tuple = list(__lowerCAmelCase ) _lowerCamelCase : int = list(__lowerCAmelCase ) _lowerCamelCase : List[str] = conv_bias _lowerCamelCase : List[str] = num_conv_pos_embeddings _lowerCamelCase : Tuple = num_conv_pos_embedding_groups _lowerCamelCase : List[str] = len(self.conv_dim ) _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : List[Any] = intermediate_size _lowerCamelCase : Dict = hidden_act _lowerCamelCase : Union[str, Any] = num_attention_heads _lowerCamelCase : Tuple = hidden_dropout _lowerCamelCase : List[Any] = attention_dropout _lowerCamelCase : Optional[int] = activation_dropout _lowerCamelCase : Optional[Any] = feat_proj_dropout _lowerCamelCase : Optional[int] = final_dropout _lowerCamelCase : Any = layerdrop _lowerCamelCase : Any = layer_norm_eps _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : List[str] = num_ctc_classes _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Optional[Any] = do_stable_layer_norm _lowerCamelCase : Tuple = use_weighted_layer_sum _lowerCamelCase : List[Any] = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCamelCase : Any = apply_spec_augment _lowerCamelCase : Dict = mask_time_prob _lowerCamelCase : List[str] = mask_time_length _lowerCamelCase : Optional[Any] = mask_time_min_masks _lowerCamelCase : List[str] = mask_feature_prob _lowerCamelCase : int = mask_feature_length _lowerCamelCase : Dict = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _lowerCamelCase : Optional[Any] = num_codevectors_per_group _lowerCamelCase : int = num_codevector_groups _lowerCamelCase : List[Any] = contrastive_logits_temperature _lowerCamelCase : List[str] = feat_quantizer_dropout _lowerCamelCase : Dict = num_negatives _lowerCamelCase : Optional[int] = codevector_dim _lowerCamelCase : List[Any] = proj_codevector_dim _lowerCamelCase : List[Any] = diversity_loss_weight # ctc loss _lowerCamelCase : Union[str, Any] = ctc_loss_reduction _lowerCamelCase : Any = ctc_zero_infinity # pretraining loss _lowerCamelCase : str = replace_prob @property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
72
1
"""simple docstring""" import numpy as np from PIL import Image def snake_case_ ( A_ : np.ndarray, A_ : int, A_ : int ): '''simple docstring''' _lowerCamelCase : List[Any] = np.array(A_ ) if arr.shape[0] != arr.shape[1]: raise ValueError('''The input array is not a square matrix''' ) _lowerCamelCase : Any = 0 _lowerCamelCase : int = 0 _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : Any = 0 # compute the shape of the output matrix _lowerCamelCase : int = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape _lowerCamelCase : List[Any] = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix _lowerCamelCase : int = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 _lowerCamelCase : Dict = 0 _lowerCamelCase : Any = 0 return updated_arr def snake_case_ ( A_ : np.ndarray, A_ : int, A_ : int ): '''simple docstring''' _lowerCamelCase : List[Any] = np.array(A_ ) if arr.shape[0] != arr.shape[1]: raise ValueError('''The input array is not a square matrix''' ) _lowerCamelCase : Optional[Any] = 0 _lowerCamelCase : Dict = 0 _lowerCamelCase : str = 0 _lowerCamelCase : str = 0 # compute the shape of the output matrix _lowerCamelCase : List[str] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape _lowerCamelCase : List[Any] = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix _lowerCamelCase : Dict = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 _lowerCamelCase : Any = 0 _lowerCamelCase : Optional[int] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='''avgpooling''', verbose=True) # Loading the image lowerCAmelCase__ = Image.open('''path_to_image''') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
72
"""simple docstring""" from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def snake_case_ ( A_ : str, A_ : str, A_ : Optional[str] = None ): '''simple docstring''' if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release: # old versions of hfh don't url-encode the file path _lowerCamelCase : Optional[Any] = quote(A_ ) return hfh.hf_hub_url(A_, A_, repo_type='''dataset''', revision=A_ )
72
1
"""simple docstring""" import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class __snake_case : def __init__( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any]=1_3 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=9_9 , __lowerCAmelCase : str=3_2 , __lowerCAmelCase : Tuple=5 , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : Optional[Any]=3_7 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : int=5_1_2 , __lowerCAmelCase : Union[str, Any]=1_6 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Dict=None , ): """simple docstring""" _lowerCamelCase : Tuple = parent _lowerCamelCase : int = batch_size _lowerCamelCase : List[Any] = seq_length _lowerCamelCase : Tuple = is_training _lowerCamelCase : int = use_input_mask _lowerCamelCase : Dict = use_token_type_ids _lowerCamelCase : Optional[int] = use_labels _lowerCamelCase : Dict = vocab_size _lowerCamelCase : List[str] = hidden_size _lowerCamelCase : Optional[int] = num_hidden_layers _lowerCamelCase : Tuple = num_attention_heads _lowerCamelCase : Optional[int] = intermediate_size _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : List[Any] = hidden_dropout_prob _lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCamelCase : Tuple = max_position_embeddings _lowerCamelCase : Dict = type_vocab_size _lowerCamelCase : Dict = type_sequence_label_size _lowerCamelCase : str = initializer_range _lowerCamelCase : int = num_labels _lowerCamelCase : str = num_choices _lowerCamelCase : str = scope def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCamelCase : Union[str, Any] = None if self.use_input_mask: _lowerCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCamelCase : Optional[int] = None if self.use_token_type_ids: _lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowerCamelCase : Optional[int] = None _lowerCamelCase : int = None _lowerCamelCase : Optional[int] = None if self.use_labels: _lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices ) _lowerCamelCase : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str ): """simple docstring""" _lowerCamelCase : Dict = NystromformerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : int = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) _lowerCamelCase : Any = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) _lowerCamelCase : str = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ): """simple docstring""" _lowerCamelCase : int = NystromformerForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Optional[int] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : Dict = NystromformerForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : List[Any] = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : str ): """simple docstring""" _lowerCamelCase : int = self.num_labels _lowerCamelCase : Union[str, Any] = NystromformerForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : str = self.num_labels _lowerCamelCase : int = NystromformerForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ): """simple docstring""" _lowerCamelCase : List[Any] = self.num_choices _lowerCamelCase : str = NystromformerForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCamelCase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCamelCase : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCamelCase : int = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : List[Any] = config_and_inputs _lowerCamelCase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __snake_case ( _lowercase , _lowercase , unittest.TestCase): snake_case__ : List[str] = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) snake_case__ : Optional[Any] = ( { "feature-extraction": NystromformerModel, "fill-mask": NystromformerForMaskedLM, "question-answering": NystromformerForQuestionAnswering, "text-classification": NystromformerForSequenceClassification, "token-classification": NystromformerForTokenClassification, "zero-shot": NystromformerForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : Optional[int] = False snake_case__ : int = False def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Any = NystromformerModelTester(self ) _lowerCamelCase : List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCamelCase : str = type self.model_tester.create_and_check_model(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Tuple = NystromformerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_torch class __snake_case ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Tuple = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' ) _lowerCamelCase : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): _lowerCamelCase : Dict = model(__lowerCAmelCase )[0] _lowerCamelCase : str = torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape , __lowerCAmelCase ) _lowerCamelCase : int = torch.tensor( [[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) ) @slow def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Dict = '''the [MASK] of Belgium is Brussels''' _lowerCamelCase : str = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' ) _lowerCamelCase : Any = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' ) _lowerCamelCase : Tuple = tokenizer(__lowerCAmelCase , return_tensors='''pt''' ) with torch.no_grad(): _lowerCamelCase : List[Any] = model(encoding.input_ids ).logits _lowerCamelCase : str = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , '''capital''' )
72
"""simple docstring""" import unittest import numpy as np def snake_case_ ( A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray | None = None, ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = np.shape(A_ ) _lowerCamelCase : List[str] = np.shape(A_ ) _lowerCamelCase : List[str] = np.shape(A_ ) if shape_a[0] != shape_b[0]: _lowerCamelCase : Tuple = ( '''Expected the same number of rows for A and B. ''' F'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(A_ ) if shape_b[1] != shape_c[1]: _lowerCamelCase : Tuple = ( '''Expected the same number of columns for B and C. ''' F'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(A_ ) _lowerCamelCase : List[str] = pseudo_inv if a_inv is None: try: _lowerCamelCase : Any = np.linalg.inv(A_ ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : List[str] = np.array([[2, 1], [6, 3]] ) _lowerCamelCase : List[Any] = schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Dict = np.block([[a, b], [b.T, c]] ) _lowerCamelCase : Tuple = np.linalg.det(__lowerCAmelCase ) _lowerCamelCase : List[str] = np.linalg.det(__lowerCAmelCase ) _lowerCamelCase : Any = np.linalg.det(__lowerCAmelCase ) self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : int = np.array([[2, 1], [6, 3]] ) with self.assertRaises(__lowerCAmelCase ): schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : Union[str, Any] = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(__lowerCAmelCase ): schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
72
1
"""simple docstring""" import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version lowerCAmelCase__ = version.parse(importlib_metadata.version('''nltk''')) if NLTK_VERSION >= version.Version('''3.6.4'''): from nltk import word_tokenize lowerCAmelCase__ = '''\ @inproceedings{banarjee2005, title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments}, author = {Banerjee, Satanjeev and Lavie, Alon}, booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization}, month = jun, year = {2005}, address = {Ann Arbor, Michigan}, publisher = {Association for Computational Linguistics}, url = {https://www.aclweb.org/anthology/W05-0909}, pages = {65--72}, } ''' lowerCAmelCase__ = '''\ METEOR, an automatic metric for machine translation evaluation that is based on a generalized concept of unigram matching between the machine-produced translation and human-produced reference translations. Unigrams can be matched based on their surface forms, stemmed forms, and meanings; furthermore, METEOR can be easily extended to include more advanced matching strategies. Once all generalized unigram matches between the two strings have been found, METEOR computes a score for this matching using a combination of unigram-precision, unigram-recall, and a measure of fragmentation that is designed to directly capture how well-ordered the matched words in the machine translation are in relation to the reference. METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic data and 0.331 on the Chinese data. This is shown to be an improvement on using simply unigram-precision, unigram-recall and their harmonic F1 combination. ''' lowerCAmelCase__ = ''' Computes METEOR score of translated segments against one or more references. Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. alpha: Parameter for controlling relative weights of precision and recall. default: 0.9 beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3 gamma: Relative weight assigned to fragmentation penalty. default: 0.5 Returns: \'meteor\': meteor score. Examples: >>> meteor = datasets.load_metric(\'meteor\') >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"] >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"] >>> results = meteor.compute(predictions=predictions, references=references) >>> print(round(results["meteor"], 4)) 0.6944 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class __snake_case ( datasets.Metric): def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[ '''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''', '''https://en.wikipedia.org/wiki/METEOR''', ] , ) def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : int ): """simple docstring""" import nltk nltk.download('''wordnet''' ) if NLTK_VERSION >= version.Version('''3.6.5''' ): nltk.download('''punkt''' ) if NLTK_VERSION >= version.Version('''3.6.6''' ): nltk.download('''omw-1.4''' ) def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : str=0.9 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Union[str, Any]=0.5 ): """simple docstring""" if NLTK_VERSION >= version.Version('''3.6.5''' ): _lowerCamelCase : List[Any] = [ meteor_score.single_meteor_score( word_tokenize(__lowerCAmelCase ) , word_tokenize(__lowerCAmelCase ) , alpha=__lowerCAmelCase , beta=__lowerCAmelCase , gamma=__lowerCAmelCase ) for ref, pred in zip(__lowerCAmelCase , __lowerCAmelCase ) ] else: _lowerCamelCase : Optional[int] = [ meteor_score.single_meteor_score(__lowerCAmelCase , __lowerCAmelCase , alpha=__lowerCAmelCase , beta=__lowerCAmelCase , gamma=__lowerCAmelCase ) for ref, pred in zip(__lowerCAmelCase , __lowerCAmelCase ) ] return {"meteor": np.mean(__lowerCAmelCase )}
72
"""simple docstring""" def snake_case_ ( A_ : list[int], A_ : str ): '''simple docstring''' _lowerCamelCase : Tuple = int(A_ ) # Initialize Result _lowerCamelCase : Dict = [] # Traverse through all denomination for denomination in reversed(A_ ): # Find denominations while int(A_ ) >= int(A_ ): total_value -= int(A_ ) answer.append(A_ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": lowerCAmelCase__ = [] lowerCAmelCase__ = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): lowerCAmelCase__ = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) lowerCAmelCase__ = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000] lowerCAmelCase__ = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(F"""Following is minimal change for {value}: """) lowerCAmelCase__ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
72
1
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class __snake_case ( unittest.TestCase): snake_case__ : Union[str, Any] = MODEL_FOR_CAUSAL_LM_MAPPING snake_case__ : List[str] = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' ) # Using `do_sample=False` to force deterministic output _lowerCamelCase : str = text_generator('''This is a test''' , do_sample=__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ] , ) _lowerCamelCase : Union[str, Any] = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( __lowerCAmelCase , [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ] , ) _lowerCamelCase : Tuple = text_generator('''This is a test''' , do_sample=__lowerCAmelCase , num_return_sequences=2 , return_tensors=__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ {'''generated_token_ids''': ANY(__lowerCAmelCase )}, {'''generated_token_ids''': ANY(__lowerCAmelCase )}, ] , ) _lowerCamelCase : str = text_generator.model.config.eos_token_id _lowerCamelCase : Tuple = '''<pad>''' _lowerCamelCase : str = text_generator( ['''This is a test''', '''This is a second test'''] , do_sample=__lowerCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__lowerCAmelCase , ) self.assertEqual( __lowerCAmelCase , [ [ {'''generated_token_ids''': ANY(__lowerCAmelCase )}, {'''generated_token_ids''': ANY(__lowerCAmelCase )}, ], [ {'''generated_token_ids''': ANY(__lowerCAmelCase )}, {'''generated_token_ids''': ANY(__lowerCAmelCase )}, ], ] , ) @require_tf def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : str = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' ) # Using `do_sample=False` to force deterministic output _lowerCamelCase : List[str] = text_generator('''This is a test''' , do_sample=__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ] , ) _lowerCamelCase : str = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ] , ) def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = TextGenerationPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase ) return text_generator, ["This is a test", "Another test"] def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : Union[str, Any] = '''Hello I believe in''' _lowerCamelCase : int = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Union[str, Any] = text_generator(__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , ) _lowerCamelCase : Dict = text_generator(__lowerCAmelCase , stop_sequence=''' fe''' ) self.assertEqual(__lowerCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] ) def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any ): """simple docstring""" _lowerCamelCase : Tuple = text_generator.model _lowerCamelCase : Optional[int] = text_generator.tokenizer _lowerCamelCase : int = text_generator('''This is a test''' ) self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ANY(__lowerCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) _lowerCamelCase : Optional[Any] = text_generator('''This is a test''' , return_full_text=__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ANY(__lowerCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) _lowerCamelCase : Optional[int] = pipeline(task='''text-generation''' , model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , return_full_text=__lowerCAmelCase ) _lowerCamelCase : str = text_generator('''This is a test''' ) self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ANY(__lowerCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) _lowerCamelCase : Tuple = text_generator('''This is a test''' , return_full_text=__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ANY(__lowerCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) _lowerCamelCase : Union[str, Any] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ [{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}], [{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}], ] , ) if text_generator.tokenizer.pad_token is not None: _lowerCamelCase : Any = text_generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ [{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}], [{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}], ] , ) with self.assertRaises(__lowerCAmelCase ): _lowerCamelCase : Dict = text_generator('''test''' , return_full_text=__lowerCAmelCase , return_text=__lowerCAmelCase ) with self.assertRaises(__lowerCAmelCase ): _lowerCamelCase : List[str] = text_generator('''test''' , return_full_text=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) with self.assertRaises(__lowerCAmelCase ): _lowerCamelCase : str = text_generator('''test''' , return_text=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): _lowerCamelCase : Any = text_generator('''''' ) self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ANY(__lowerCAmelCase )}] ) else: with self.assertRaises((ValueError, AssertionError) ): _lowerCamelCase : str = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. _lowerCamelCase : Union[str, Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 1_0_0_0_0 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 5_0_0 , max_new_tokens=2_0 ) _lowerCamelCase : str = text_generator('''This is a test''' * 5_0_0 , handle_long_generation='''hole''' , max_new_tokens=2_0 ) # Hole strategy cannot work with self.assertRaises(__lowerCAmelCase ): text_generator( '''This is a test''' * 5_0_0 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 1_0 , ) @require_torch @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" import torch # Classic `model_kwargs` _lowerCamelCase : str = pipeline( model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) _lowerCamelCase : Dict = pipe('''This is a test''' ) self.assertEqual( __lowerCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) _lowerCamelCase : Union[str, Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) _lowerCamelCase : str = pipe('''This is a test''' ) self.assertEqual( __lowerCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 _lowerCamelCase : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) _lowerCamelCase : Dict = pipe('''This is a test''' ) self.assertEqual( __lowerCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) @require_torch @require_torch_gpu def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" import torch _lowerCamelCase : int = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" import torch _lowerCamelCase : Optional[Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa ) pipe('''This is a test''' , do_sample=__lowerCAmelCase , top_p=0.5 ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : List[Any] = '''Hello world''' _lowerCamelCase : int = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": _lowerCamelCase : int = logging.get_logger('''transformers.generation.tf_utils''' ) else: _lowerCamelCase : str = logging.get_logger('''transformers.generation.utils''' ) _lowerCamelCase : Tuple = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(__lowerCAmelCase ) as cl: _lowerCamelCase : int = text_generator(__lowerCAmelCase , max_length=1_0 , max_new_tokens=1 ) self.assertIn(__lowerCAmelCase , cl.out ) # The user only sets one -> no warning with CaptureLogger(__lowerCAmelCase ) as cl: _lowerCamelCase : Dict = text_generator(__lowerCAmelCase , max_new_tokens=1 ) self.assertNotIn(__lowerCAmelCase , cl.out ) with CaptureLogger(__lowerCAmelCase ) as cl: _lowerCamelCase : List[str] = text_generator(__lowerCAmelCase , max_length=1_0 ) self.assertNotIn(__lowerCAmelCase , cl.out )
72
"""simple docstring""" def snake_case_ ( A_ : int = 2_00_00_00 ): '''simple docstring''' _lowerCamelCase : int = [0 for i in range(n + 1 )] _lowerCamelCase : List[str] = 1 _lowerCamelCase : Any = 1 for i in range(2, int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i, n + 1, A_ ): _lowerCamelCase : str = 1 _lowerCamelCase : Tuple = 0 for i in range(A_ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(F"""{solution() = }""")
72
1
"""simple docstring""" from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def snake_case_ ( A_ : str, A_ : float | Decimal, A_ : float = 10**-10 ): '''simple docstring''' _lowerCamelCase : Optional[Any] = a while True: _lowerCamelCase : Union[str, Any] = Decimal(A_ ) - ( Decimal(eval(A_ ) ) / Decimal(eval(str(diff(A_ ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(A_ ) ) < precision: # noqa: S307 return float(A_ ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""") # Find root of polynomial print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""") # Find Square Root of 5 print(F"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""") # Exponential Roots print(F"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
72
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def snake_case_ ( A_ : Any ): '''simple docstring''' _lowerCamelCase : Any = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(A_, A_ ) def snake_case_ ( A_ : Union[str, Any] ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : Tuple = emb.weight.shape _lowerCamelCase : Dict = nn.Linear(A_, A_, bias=A_ ) _lowerCamelCase : str = emb.weight.data return lin_layer def snake_case_ ( A_ : str, A_ : Optional[int]="facebook/mbart-large-en-ro", A_ : Union[str, Any]=False, A_ : List[str]=False ): '''simple docstring''' _lowerCamelCase : Tuple = torch.load(A_, map_location='''cpu''' )['''model'''] remove_ignore_keys_(A_ ) _lowerCamelCase : int = state_dict['''encoder.embed_tokens.weight'''].shape[0] _lowerCamelCase : Any = MBartConfig.from_pretrained(A_, vocab_size=A_ ) if mbart_aa and finetuned: _lowerCamelCase : Any = '''relu''' _lowerCamelCase : Optional[int] = state_dict['''decoder.embed_tokens.weight'''] _lowerCamelCase : Any = MBartForConditionalGeneration(A_ ) model.model.load_state_dict(A_ ) if finetuned: _lowerCamelCase : str = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
72
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCAmelCase__ = { '''configuration_clip''': [ '''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPConfig''', '''CLIPOnnxConfig''', '''CLIPTextConfig''', '''CLIPVisionConfig''', ], '''processing_clip''': ['''CLIPProcessor'''], '''tokenization_clip''': ['''CLIPTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''CLIPTokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''CLIPFeatureExtractor'''] lowerCAmelCase__ = ['''CLIPImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPModel''', '''CLIPPreTrainedModel''', '''CLIPTextModel''', '''CLIPTextModelWithProjection''', '''CLIPVisionModel''', '''CLIPVisionModelWithProjection''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCLIPModel''', '''TFCLIPPreTrainedModel''', '''TFCLIPTextModel''', '''TFCLIPVisionModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''FlaxCLIPModel''', '''FlaxCLIPPreTrainedModel''', '''FlaxCLIPTextModel''', '''FlaxCLIPTextPreTrainedModel''', '''FlaxCLIPVisionModel''', '''FlaxCLIPVisionPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
"""simple docstring""" def snake_case_ ( A_ : list[list] ): '''simple docstring''' _lowerCamelCase : Optional[int] = current_set.copy() for row_index, row in enumerate(A_ ): _lowerCamelCase : Tuple = row[0] for column_index, column in enumerate(A_ ): if magnitude == 0: _lowerCamelCase : List[Any] = column continue _lowerCamelCase : List[Any] = column / magnitude # Subtract to cancel term _lowerCamelCase : Union[str, Any] = current_set[0] _lowerCamelCase : Dict = [first_row] _lowerCamelCase : str = current_set[1::] for row in current_set: _lowerCamelCase : Union[str, Any] = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(A_ ) continue for column_index in range(len(A_ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(A_ ) # Create next recursion iteration set if len(final_set[0] ) != 3: _lowerCamelCase : Any = final_set[0] _lowerCamelCase : Any = [] _lowerCamelCase : Optional[int] = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _lowerCamelCase : Dict = simplify(A_ ) for i in range(len(A_ ) ): resultant[i].insert(0, current_first_column[i] ) resultant.insert(0, A_ ) _lowerCamelCase : Tuple = resultant return final_set def snake_case_ ( A_ : list[list] ): '''simple docstring''' if len(A_ ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) _lowerCamelCase : Dict = len(A_ ) + 1 if any(len(A_ ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(A_, (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(A_ ) == 1: return [equations[0][-1] / equations[0][0]] _lowerCamelCase : Optional[Any] = equations.copy() if any(0 in row for row in data_set ): _lowerCamelCase : str = data_set.copy() _lowerCamelCase : List[Any] = [] for row_index, row in enumerate(A_ ): if 0 not in row: _lowerCamelCase : Union[str, Any] = data_set.pop(A_ ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0, A_ ) _lowerCamelCase : List[str] = data_set.copy() _lowerCamelCase : int = simplify(A_ ) _lowerCamelCase : int = simplified[::-1] _lowerCamelCase : list = [] for row in simplified: _lowerCamelCase : Tuple = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _lowerCamelCase : Optional[Any] = row.copy()[: len(A_ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(A_ ) == 0: solutions.append(0 ) continue _lowerCamelCase : Tuple = temp_row[1::] _lowerCamelCase : Tuple = temp_row[::-1] for column_index, column in enumerate(A_ ): current_solution -= column * solutions[column_index] solutions.append(A_ ) _lowerCamelCase : Optional[int] = [] for item in solutions: final.append(float(round(A_, 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
72
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __snake_case ( _lowercase , unittest.TestCase): snake_case__ : Any = KandinskyVaaPipeline snake_case__ : List[str] = [ "image_embeds", "negative_image_embeds", ] snake_case__ : Optional[int] = ["image_embeds", "negative_image_embeds"] snake_case__ : Any = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] snake_case__ : Union[str, Any] = False @property def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return 3_2 @property def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return 3_2 @property def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return self.time_input_dim @property def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" return self.time_input_dim * 4 @property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" return 1_0_0 @property def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" torch.manual_seed(0 ) _lowerCamelCase : Any = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } _lowerCamelCase : Tuple = UNetaDConditionModel(**__lowerCAmelCase ) return model @property def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" return { "block_out_channels": [3_2, 6_4], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" torch.manual_seed(0 ) _lowerCamelCase : Any = VQModel(**self.dummy_movq_kwargs ) return model def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = self.dummy_unet _lowerCamelCase : Optional[Any] = self.dummy_movq _lowerCamelCase : Optional[int] = DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__lowerCAmelCase , ) _lowerCamelCase : Optional[int] = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=0 ): """simple docstring""" _lowerCamelCase : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowerCAmelCase ) if str(__lowerCAmelCase ).startswith('''mps''' ): _lowerCamelCase : Union[str, Any] = torch.manual_seed(__lowerCAmelCase ) else: _lowerCamelCase : Any = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _lowerCamelCase : List[str] = { '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 6_4, '''width''': 6_4, '''guidance_scale''': 4.0, '''num_inference_steps''': 2, '''output_type''': '''np''', } return inputs def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : Optional[Any] = '''cpu''' _lowerCamelCase : Any = self.get_dummy_components() _lowerCamelCase : Union[str, Any] = self.pipeline_class(**__lowerCAmelCase ) _lowerCamelCase : Tuple = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) ) _lowerCamelCase : List[Any] = output.images _lowerCamelCase : str = pipe( **self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0] _lowerCamelCase : Any = image[0, -3:, -3:, -1] _lowerCamelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) _lowerCamelCase : int = np.array( [0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' ) _lowerCamelCase : Dict = KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(__lowerCAmelCase ) _lowerCamelCase : Any = KandinskyVaaPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa ) _lowerCamelCase : int = pipeline.to(__lowerCAmelCase ) pipeline.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : int = '''red cat, 4k photo''' _lowerCamelCase : Any = torch.Generator(device='''cuda''' ).manual_seed(0 ) _lowerCamelCase , _lowerCamelCase : int = pipe_prior( __lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() _lowerCamelCase : List[Any] = torch.Generator(device='''cuda''' ).manual_seed(0 ) _lowerCamelCase : Dict = pipeline( image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=1_0_0 , output_type='''np''' , ) _lowerCamelCase : Optional[int] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
72
"""simple docstring""" import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __snake_case ( _lowercase): snake_case__ : List[Any] = "Speech2TextFeatureExtractor" snake_case__ : Union[str, Any] = "Speech2TextTokenizer" def __init__( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" super().__init__(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : List[str] = self.feature_extractor _lowerCamelCase : str = False def __call__( self : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) _lowerCamelCase : str = kwargs.pop('''raw_speech''' ) else: _lowerCamelCase : Tuple = kwargs.pop('''audio''' , __lowerCAmelCase ) _lowerCamelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = kwargs.pop('''text''' , __lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: _lowerCamelCase : List[Any] = args[0] _lowerCamelCase : int = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: _lowerCamelCase : List[Any] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None: _lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: _lowerCamelCase : List[str] = encodings['''input_ids'''] return inputs def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ): """simple docstring""" return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @contextmanager def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) _lowerCamelCase : Union[str, Any] = True _lowerCamelCase : Any = self.tokenizer yield _lowerCamelCase : List[str] = self.feature_extractor _lowerCamelCase : Tuple = False
72
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Swinv2ForImageClassification''', '''Swinv2ForMaskedImageModeling''', '''Swinv2Model''', '''Swinv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
1
"""simple docstring""" import numpy as np import datasets lowerCAmelCase__ = ''' Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] ''' lowerCAmelCase__ = '''\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } ''' lowerCAmelCase__ = ''' Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric("mahalanobis") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {\'mahalanobis\': array([0.5])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class __snake_case ( datasets.Metric): def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ), } ) , ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str ): """simple docstring""" _lowerCamelCase : Tuple = np.array(__lowerCAmelCase ) _lowerCamelCase : Any = np.array(__lowerCAmelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError('''Expected `X` to be a 2D vector''' ) if len(reference_distribution.shape ) != 2: raise ValueError('''Expected `reference_distribution` to be a 2D vector''' ) if reference_distribution.shape[0] < 2: raise ValueError( '''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' ) # Get mahalanobis distance for each prediction _lowerCamelCase : Dict = X - np.mean(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = np.cov(reference_distribution.T ) try: _lowerCamelCase : Optional[int] = np.linalg.inv(__lowerCAmelCase ) except np.linalg.LinAlgError: _lowerCamelCase : Any = np.linalg.pinv(__lowerCAmelCase ) _lowerCamelCase : List[Any] = np.dot(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : List[Any] = np.dot(__lowerCAmelCase , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
72
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_autoformer''': [ '''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AutoformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AutoformerForPrediction''', '''AutoformerModel''', '''AutoformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
1
"""simple docstring""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class __snake_case ( _lowercase): snake_case__ : torch.FloatTensor snake_case__ : Optional[torch.FloatTensor] = None def snake_case_ ( A_ : List[Any], A_ : Union[str, Any]=0.999, A_ : str="cosine", ): '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(A_ : Any ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(A_ : int ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) _lowerCamelCase : List[str] = [] for i in range(A_ ): _lowerCamelCase : List[str] = i / num_diffusion_timesteps _lowerCamelCase : Any = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(A_ ) / alpha_bar_fn(A_ ), A_ ) ) return torch.tensor(A_, dtype=torch.floataa ) class __snake_case ( _lowercase , _lowercase): @register_to_config def __init__( self : int , __lowerCAmelCase : int = 1_0_0_0 , __lowerCAmelCase : str = "fixed_small_log" , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[float] = 1.0 , __lowerCAmelCase : str = "epsilon" , __lowerCAmelCase : str = "squaredcos_cap_v2" , ): """simple docstring""" if beta_schedule != "squaredcos_cap_v2": raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' ) _lowerCamelCase : str = betas_for_alpha_bar(__lowerCAmelCase ) _lowerCamelCase : Any = 1.0 - self.betas _lowerCamelCase : List[str] = torch.cumprod(self.alphas , dim=0 ) _lowerCamelCase : Dict = torch.tensor(1.0 ) # standard deviation of the initial noise distribution _lowerCamelCase : Any = 1.0 # setable values _lowerCamelCase : str = None _lowerCamelCase : int = torch.from_numpy(np.arange(0 , __lowerCAmelCase )[::-1].copy() ) _lowerCamelCase : int = variance_type def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[int] = None ): """simple docstring""" return sample def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, torch.device] = None ): """simple docstring""" _lowerCamelCase : Dict = num_inference_steps _lowerCamelCase : Any = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) _lowerCamelCase : Union[str, Any] = (np.arange(0 , __lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) _lowerCamelCase : str = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : int=None ): """simple docstring""" if prev_timestep is None: _lowerCamelCase : int = t - 1 _lowerCamelCase : int = self.alphas_cumprod[t] _lowerCamelCase : List[str] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Optional[int] = 1 - alpha_prod_t _lowerCamelCase : Optional[int] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : Optional[int] = self.betas[t] else: _lowerCamelCase : Union[str, Any] = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample _lowerCamelCase : Dict = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: _lowerCamelCase : List[Any] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": _lowerCamelCase : Tuple = torch.log(torch.clamp(__lowerCAmelCase , min=1E-20 ) ) _lowerCamelCase : Optional[Any] = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler _lowerCamelCase : Dict = variance.log() _lowerCamelCase : Optional[int] = beta.log() _lowerCamelCase : Dict = (predicted_variance + 1) / 2 _lowerCamelCase : str = frac * max_log + (1 - frac) * min_log return variance def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : bool = True , ): """simple docstring""" _lowerCamelCase : Dict = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": _lowerCamelCase , _lowerCamelCase : int = torch.split(__lowerCAmelCase , sample.shape[1] , dim=1 ) else: _lowerCamelCase : Any = None # 1. compute alphas, betas if prev_timestep is None: _lowerCamelCase : List[str] = t - 1 _lowerCamelCase : Any = self.alphas_cumprod[t] _lowerCamelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : List[str] = 1 - alpha_prod_t _lowerCamelCase : List[str] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : Optional[Any] = self.betas[t] _lowerCamelCase : List[Any] = self.alphas[t] else: _lowerCamelCase : Tuple = 1 - alpha_prod_t / alpha_prod_t_prev _lowerCamelCase : Any = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": _lowerCamelCase : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": _lowerCamelCase : Dict = model_output else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`''' ''' for the UnCLIPScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: _lowerCamelCase : Dict = torch.clamp( __lowerCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : Dict = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t _lowerCamelCase : int = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _lowerCamelCase : Optional[Any] = 0 if t > 0: _lowerCamelCase : Dict = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=__lowerCAmelCase , device=model_output.device ) _lowerCamelCase : Any = self._get_variance( __lowerCAmelCase , predicted_variance=__lowerCAmelCase , prev_timestep=__lowerCAmelCase , ) if self.variance_type == "fixed_small_log": _lowerCamelCase : Optional[int] = variance elif self.variance_type == "learned_range": _lowerCamelCase : Optional[int] = (0.5 * variance).exp() else: raise ValueError( f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`''' ''' for the UnCLIPScheduler.''' ) _lowerCamelCase : Optional[int] = variance * variance_noise _lowerCamelCase : int = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.IntTensor , ): """simple docstring""" _lowerCamelCase : Dict = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) _lowerCamelCase : Optional[Any] = timesteps.to(original_samples.device ) _lowerCamelCase : str = alphas_cumprod[timesteps] ** 0.5 _lowerCamelCase : int = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : List[str] = sqrt_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5 _lowerCamelCase : str = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : List[Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Tuple = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
72
"""simple docstring""" import math def snake_case_ ( A_ : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def snake_case_ ( A_ : float = 0.1 ): '''simple docstring''' _lowerCamelCase : Optional[int] = 3 _lowerCamelCase : List[str] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ): primes += is_prime(A_ ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
72
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class __snake_case ( metaclass=_lowercase): snake_case__ : List[Any] = ["sentencepiece"] def __init__( self : List[Any] , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[Any] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : Optional[int] = ["sentencepiece"] def __init__( self : List[Any] , *__lowerCAmelCase : str , **__lowerCAmelCase : Dict ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : Dict = ["sentencepiece"] def __init__( self : int , *__lowerCAmelCase : int , **__lowerCAmelCase : Any ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : Optional[int] = ["sentencepiece"] def __init__( self : List[str] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Optional[Any] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : Tuple = ["sentencepiece"] def __init__( self : Any , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Any ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : List[str] = ["sentencepiece"] def __init__( self : List[Any] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Optional[int] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : List[Any] = ["sentencepiece"] def __init__( self : Tuple , *__lowerCAmelCase : int , **__lowerCAmelCase : Any ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : Optional[Any] = ["sentencepiece"] def __init__( self : List[Any] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : str ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : Union[str, Any] = ["sentencepiece"] def __init__( self : Tuple , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : Optional[Any] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : int = ["sentencepiece"] def __init__( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Dict ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : str = ["sentencepiece"] def __init__( self : List[str] , *__lowerCAmelCase : int , **__lowerCAmelCase : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : List[str] = ["sentencepiece"] def __init__( self : Dict , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Any ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : Union[str, Any] = ["sentencepiece"] def __init__( self : str , *__lowerCAmelCase : Dict , **__lowerCAmelCase : List[str] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : int = ["sentencepiece"] def __init__( self : Optional[int] , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Optional[int] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : int = ["sentencepiece"] def __init__( self : List[str] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : str ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : List[Any] = ["sentencepiece"] def __init__( self : List[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : List[Any] = ["sentencepiece"] def __init__( self : Optional[Any] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : List[str] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : Tuple = ["sentencepiece"] def __init__( self : List[str] , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : int ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : List[Any] = ["sentencepiece"] def __init__( self : Optional[int] , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : Tuple ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : Optional[int] = ["sentencepiece"] def __init__( self : Tuple , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : List[Any] = ["sentencepiece"] def __init__( self : Any , *__lowerCAmelCase : Any , **__lowerCAmelCase : Optional[int] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : str = ["sentencepiece"] def __init__( self : int , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : List[Any] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : Optional[Any] = ["sentencepiece"] def __init__( self : List[Any] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : int ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : List[Any] = ["sentencepiece"] def __init__( self : Optional[Any] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : List[Any] = ["sentencepiece"] def __init__( self : Optional[Any] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : Optional[int] = ["sentencepiece"] def __init__( self : Optional[Any] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Tuple ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : List[Any] = ["sentencepiece"] def __init__( self : Optional[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : str ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : Any = ["sentencepiece"] def __init__( self : Optional[int] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Optional[Any] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : Union[str, Any] = ["sentencepiece"] def __init__( self : Tuple , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : List[str] = ["sentencepiece"] def __init__( self : List[Any] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : int ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __snake_case ( metaclass=_lowercase): snake_case__ : Optional[Any] = ["sentencepiece"] def __init__( self : str , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : List[str] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] )
72
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _lowerCamelCase : Union[str, Any] = TextStreamer(__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCamelCase : int = cs.out[:-1] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : List[str] = tokenizer.decode(greedy_ids[0] ) _lowerCamelCase : Tuple = TextIteratorStreamer(__lowerCAmelCase ) _lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase ) thread.start() _lowerCamelCase : int = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : int = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = greedy_ids[:, input_ids.shape[1] :] _lowerCamelCase : int = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _lowerCamelCase : Any = TextStreamer(__lowerCAmelCase , skip_prompt=__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCamelCase : Union[str, Any] = cs.out[:-1] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' ) _lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : str = -1 _lowerCamelCase : Any = torch.ones((1, 5) , device=__lowerCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: _lowerCamelCase : List[Any] = TextStreamer(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _lowerCamelCase : Any = cs.out[:-1] # Remove the final "\n" _lowerCamelCase : int = tokenizer(__lowerCAmelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = -1 _lowerCamelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = TextIteratorStreamer(__lowerCAmelCase , timeout=0.0_01 ) _lowerCamelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__lowerCAmelCase ): _lowerCamelCase : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text
72
1
"""simple docstring""" lowerCAmelCase__ = { '''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''', '''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''', '''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''', '''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''', '''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''', '''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''', ''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''', '''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''', '''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/''' } # Exclamation mark is not in ITU-R recommendation # fmt: on lowerCAmelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()} def snake_case_ ( A_ : str ): '''simple docstring''' return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def snake_case_ ( A_ : str ): '''simple docstring''' return "".join(REVERSE_DICT[char] for char in message.split() ) def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = '''Morse code here!''' print(A_ ) _lowerCamelCase : List[str] = encrypt(A_ ) print(A_ ) _lowerCamelCase : Any = decrypt(A_ ) print(A_ ) if __name__ == "__main__": main()
72
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase__ = logging.get_logger(__name__) # TODO: upload to AWS lowerCAmelCase__ = { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json''' ), } class __snake_case ( _lowercase): snake_case__ : int = "retribert" def __init__( self : Optional[int] , __lowerCAmelCase : str=3_0_5_2_2 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1E-12 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : str , ): """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) _lowerCamelCase : Dict = vocab_size _lowerCamelCase : Union[str, Any] = hidden_size _lowerCamelCase : Dict = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : int = hidden_act _lowerCamelCase : str = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : List[Any] = attention_probs_dropout_prob _lowerCamelCase : Optional[int] = max_position_embeddings _lowerCamelCase : List[Any] = type_vocab_size _lowerCamelCase : Any = initializer_range _lowerCamelCase : Optional[int] = layer_norm_eps _lowerCamelCase : int = share_encoders _lowerCamelCase : Optional[Any] = projection_dim
72
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class __snake_case ( metaclass=_lowercase): snake_case__ : Union[str, Any] = ["torch", "torchsde"] def __init__( self : Dict , *__lowerCAmelCase : str , **__lowerCAmelCase : Optional[int] ): """simple docstring""" requires_backends(self , ['''torch''', '''torchsde'''] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Optional[int] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Optional[Any] ): """simple docstring""" requires_backends(cls , ['''torch''', '''torchsde'''] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : int , *__lowerCAmelCase : str , **__lowerCAmelCase : str ): """simple docstring""" requires_backends(cls , ['''torch''', '''torchsde'''] )
72
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : Optional[int] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : str = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] _lowerCamelCase : Optional[Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : Tuple = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _lowerCamelCase : Any = '''fp16''' self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] _lowerCamelCase : str = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] _lowerCamelCase : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : int = '''fp16''' self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
72
1
"""simple docstring""" import os import sys import unittest lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path lowerCAmelCase__ = os.path.join(git_repo_path, '''src''', '''transformers''') lowerCAmelCase__ = ''' {0} = None ''' lowerCAmelCase__ = ''' class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) ''' lowerCAmelCase__ = ''' def {0}(*args, **kwargs): requires_backends({0}, {1}) ''' class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Any = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' ) self.assertIsNone(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = find_backend(''' if not is_tokenizers_available():''' ) self.assertEqual(__lowerCAmelCase , '''tokenizers''' ) _lowerCamelCase : Dict = find_backend(''' if not is_tensorflow_text_available():''' ) self.assertEqual(__lowerCAmelCase , '''tensorflow_text''' ) _lowerCamelCase : Optional[Any] = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' ) self.assertEqual(__lowerCAmelCase , '''sentencepiece_and_tokenizers''' ) _lowerCamelCase : Dict = find_backend( ''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' ) self.assertEqual(__lowerCAmelCase , '''sentencepiece_and_tensorflow_text''' ) _lowerCamelCase : Tuple = find_backend( ''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' ) self.assertEqual(__lowerCAmelCase , '''sentencepiece_and_tokenizers_and_vision''' ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Union[str, Any] = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , __lowerCAmelCase ) self.assertIn('''tensorflow_text''' , __lowerCAmelCase ) self.assertIn('''sentencepiece_and_tokenizers''' , __lowerCAmelCase ) # Likewise, we can't assert on the exact content of a key self.assertIn('''BertModel''' , objects['''torch'''] ) self.assertIn('''TFBertModel''' , objects['''tf'''] ) self.assertIn('''FlaxBertModel''' , objects['''flax'''] ) self.assertIn('''BertModel''' , objects['''torch'''] ) self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] ) self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' ) self.assertEqual(__lowerCAmelCase , '''\nCONSTANT = None\n''' ) _lowerCamelCase : List[str] = create_dummy_object('''function''' , '''\'torch\'''' ) self.assertEqual( __lowerCAmelCase , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' ) _lowerCamelCase : List[Any] = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') ''' _lowerCamelCase : Tuple = create_dummy_object('''FakeClass''' , '''\'torch\'''' ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : List[Any] = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ''' _lowerCamelCase : str = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} ) self.assertEqual(dummy_files['''torch'''] , __lowerCAmelCase )
72
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class __snake_case : def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=1_0 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=3_2 * 4 , __lowerCAmelCase : Dict=3_2 * 6 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=3_2 , ): """simple docstring""" _lowerCamelCase : List[str] = parent _lowerCamelCase : str = batch_size _lowerCamelCase : Dict = is_training _lowerCamelCase : str = use_auxiliary_loss _lowerCamelCase : Any = num_queries _lowerCamelCase : List[Any] = num_channels _lowerCamelCase : int = min_size _lowerCamelCase : Any = max_size _lowerCamelCase : int = num_labels _lowerCamelCase : List[str] = mask_feature_size def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowerCAmelCase ) _lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5 ).float() _lowerCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long() _lowerCamelCase : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.prepare_config_and_inputs() _lowerCamelCase : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = output.encoder_hidden_states _lowerCamelCase : Tuple = output.pixel_decoder_hidden_states _lowerCamelCase : Dict = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers ) def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=False ): """simple docstring""" with torch.no_grad(): _lowerCamelCase : Optional[int] = MaskFormerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Tuple = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) _lowerCamelCase : List[str] = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() def comm_check_on_output(__lowerCAmelCase : Dict ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _lowerCamelCase : str = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) _lowerCamelCase : List[str] = model( pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __snake_case ( _lowercase , _lowercase , unittest.TestCase): snake_case__ : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () snake_case__ : Any = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) snake_case__ : List[str] = False snake_case__ : List[str] = False snake_case__ : Optional[int] = False snake_case__ : Dict = False def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = MaskFormerModelTester(self ) _lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Dict = [*signature.parameters.keys()] _lowerCamelCase : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: _lowerCamelCase : Union[str, Any] = MaskFormerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2 _lowerCamelCase : Union[str, Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=__lowerCAmelCase ), '''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowerCAmelCase ), '''class_labels''': torch.zeros(2 , 1_0 , device=__lowerCAmelCase ).long(), } _lowerCamelCase : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _lowerCamelCase : Union[str, Any] = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : Any = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _lowerCamelCase : List[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : int = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[int] = True _lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) _lowerCamelCase : List[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCamelCase : Tuple = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _lowerCamelCase : List[str] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCamelCase : Optional[int] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) lowerCAmelCase__ = 1E-4 def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class __snake_case ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : str = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowerCAmelCase ) _lowerCamelCase : Any = self.default_image_processor _lowerCamelCase : List[Any] = prepare_img() _lowerCamelCase : Any = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : Any = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : int = model(**__lowerCAmelCase ) _lowerCamelCase : str = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) _lowerCamelCase : Union[str, Any] = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) _lowerCamelCase : Optional[int] = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[int] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : str = prepare_img() _lowerCamelCase : int = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : str = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : Optional[int] = model(**__lowerCAmelCase ) # masks_queries_logits _lowerCamelCase : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCamelCase : List[str] = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] _lowerCamelCase : Any = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits _lowerCamelCase : List[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCamelCase : str = torch.tensor( [ [1.6512E00, -5.2572E00, -3.3519E00], [3.6169E-02, -5.9025E00, -2.9313E00], [1.0766E-04, -7.7630E00, -5.1263E00], ] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Any = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : Tuple = self.default_image_processor _lowerCamelCase : Tuple = prepare_img() _lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : List[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase ) # masks_queries_logits _lowerCamelCase : List[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCamelCase : int = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] _lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits _lowerCamelCase : Dict = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCamelCase : Any = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : List[str] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , ) _lowerCamelCase : Union[str, Any] = inputs['''pixel_values'''].to(__lowerCAmelCase ) _lowerCamelCase : Dict = [el.to(__lowerCAmelCase ) for el in inputs['''mask_labels''']] _lowerCamelCase : Optional[Any] = [el.to(__lowerCAmelCase ) for el in inputs['''class_labels''']] with torch.no_grad(): _lowerCamelCase : Tuple = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
72
1
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING lowerCAmelCase__ = logging.get_logger(__name__) class __snake_case ( _lowercase): snake_case__ : Union[str, Any] = "upernet" def __init__( self : Dict , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Union[str, Any]=5_1_2 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : List[Any]=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : str=0.4 , __lowerCAmelCase : Union[str, Any]=3_8_4 , __lowerCAmelCase : Optional[int]=2_5_6 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=2_5_5 , **__lowerCAmelCase : List[str] , ): """simple docstring""" super().__init__(**__lowerCAmelCase ) if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) _lowerCamelCase : Union[str, Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): _lowerCamelCase : Any = backbone_config.get('''model_type''' ) _lowerCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type] _lowerCamelCase : int = config_class.from_dict(__lowerCAmelCase ) _lowerCamelCase : List[Any] = backbone_config _lowerCamelCase : Tuple = hidden_size _lowerCamelCase : Any = initializer_range _lowerCamelCase : Optional[Any] = pool_scales _lowerCamelCase : List[Any] = use_auxiliary_head _lowerCamelCase : Dict = auxiliary_loss_weight _lowerCamelCase : str = auxiliary_in_channels _lowerCamelCase : Optional[int] = auxiliary_channels _lowerCamelCase : Optional[int] = auxiliary_num_convs _lowerCamelCase : str = auxiliary_concat_input _lowerCamelCase : Dict = loss_ignore_index def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ ) _lowerCamelCase : Union[str, Any] = self.backbone_config.to_dict() _lowerCamelCase : List[Any] = self.__class__.model_type return output
72
"""simple docstring""" lowerCAmelCase__ = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def snake_case_ ( A_ : dict, A_ : int, A_ : int ): '''simple docstring''' _lowerCamelCase : List[str] = set() # keep track of all the paths to be checked _lowerCamelCase : str = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _lowerCamelCase : str = queue.pop(0 ) # get the last node from the path _lowerCamelCase : List[Any] = path[-1] if node not in explored: _lowerCamelCase : Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _lowerCamelCase : Union[str, Any] = list(A_ ) new_path.append(A_ ) queue.append(A_ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(A_ ) # in case there's no path between the 2 nodes return [] def snake_case_ ( A_ : dict, A_ : int, A_ : Dict ): '''simple docstring''' if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _lowerCamelCase : Optional[int] = [start] _lowerCamelCase : int = set(A_ ) # Keep tab on distances from `start` node. _lowerCamelCase : int = {start: 0, target: -1} while queue: _lowerCamelCase : Optional[Any] = queue.pop(0 ) if node == target: _lowerCamelCase : Any = ( dist[node] if dist[target] == -1 else min(dist[target], dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(A_ ) queue.append(A_ ) _lowerCamelCase : Any = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
72
1
"""simple docstring""" from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class __snake_case ( _lowercase): snake_case__ : Dict = ["vqvae"] def __init__( self : int , __lowerCAmelCase : AutoencoderKL , __lowerCAmelCase : UNetaDConditionModel , __lowerCAmelCase : Mel , __lowerCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ): """simple docstring""" super().__init__() self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , mel=__lowerCAmelCase , vqvae=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" return 5_0 if isinstance(self.scheduler , __lowerCAmelCase ) else 1_0_0_0 @torch.no_grad() def __call__( self : Optional[int] , __lowerCAmelCase : int = 1 , __lowerCAmelCase : str = None , __lowerCAmelCase : np.ndarray = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = None , __lowerCAmelCase : torch.Generator = None , __lowerCAmelCase : float = 0 , __lowerCAmelCase : float = 0 , __lowerCAmelCase : torch.Generator = None , __lowerCAmelCase : float = 0 , __lowerCAmelCase : torch.Tensor = None , __lowerCAmelCase : torch.Tensor = None , __lowerCAmelCase : Any=True , ): """simple docstring""" _lowerCamelCase : Any = steps or self.get_default_steps() self.scheduler.set_timesteps(__lowerCAmelCase ) _lowerCamelCase : str = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: _lowerCamelCase : Union[str, Any] = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: _lowerCamelCase : Optional[int] = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=__lowerCAmelCase , device=self.device , ) _lowerCamelCase : str = noise _lowerCamelCase : List[str] = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Optional[int] = self.mel.audio_slice_to_image(__lowerCAmelCase ) _lowerCamelCase : Any = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape( (input_image.height, input_image.width) ) _lowerCamelCase : Optional[Any] = (input_image / 2_5_5) * 2 - 1 _lowerCamelCase : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: _lowerCamelCase : List[str] = self.vqvae.encode(torch.unsqueeze(__lowerCAmelCase , 0 ) ).latent_dist.sample( generator=__lowerCAmelCase )[0] _lowerCamelCase : int = self.vqvae.config.scaling_factor * input_images if start_step > 0: _lowerCamelCase : List[Any] = self.scheduler.add_noise(__lowerCAmelCase , __lowerCAmelCase , self.scheduler.timesteps[start_step - 1] ) _lowerCamelCase : Dict = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) _lowerCamelCase : Union[str, Any] = int(mask_start_secs * pixels_per_second ) _lowerCamelCase : Union[str, Any] = int(mask_end_secs * pixels_per_second ) _lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(__lowerCAmelCase , __lowerCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , __lowerCAmelCase ): _lowerCamelCase : Optional[Any] = self.unet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )['''sample'''] else: _lowerCamelCase : Dict = self.unet(__lowerCAmelCase , __lowerCAmelCase )['''sample'''] if isinstance(self.scheduler , __lowerCAmelCase ): _lowerCamelCase : Optional[Any] = self.scheduler.step( model_output=__lowerCAmelCase , timestep=__lowerCAmelCase , sample=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , )['''prev_sample'''] else: _lowerCamelCase : int = self.scheduler.step( model_output=__lowerCAmelCase , timestep=__lowerCAmelCase , sample=__lowerCAmelCase , generator=__lowerCAmelCase , )['''prev_sample'''] if mask is not None: if mask_start > 0: _lowerCamelCase : List[Any] = mask[:, step, :, :mask_start] if mask_end > 0: _lowerCamelCase : Optional[Any] = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance _lowerCamelCase : List[Any] = 1 / self.vqvae.config.scaling_factor * images _lowerCamelCase : List[str] = self.vqvae.decode(__lowerCAmelCase )['''sample'''] _lowerCamelCase : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 ) _lowerCamelCase : int = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() _lowerCamelCase : Optional[int] = (images * 2_5_5).round().astype('''uint8''' ) _lowerCamelCase : List[Any] = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(__lowerCAmelCase , mode='''RGB''' ).convert('''L''' ) for _ in images) ) _lowerCamelCase : Optional[Any] = [self.mel.image_to_audio(__lowerCAmelCase ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(__lowerCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowerCAmelCase ) ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : List[Image.Image] , __lowerCAmelCase : int = 5_0 ): """simple docstring""" assert isinstance(self.scheduler , __lowerCAmelCase ) self.scheduler.set_timesteps(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = np.array( [np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] ) _lowerCamelCase : Optional[Any] = (sample / 2_5_5) * 2 - 1 _lowerCamelCase : Optional[Any] = torch.Tensor(__lowerCAmelCase ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): _lowerCamelCase : int = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps _lowerCamelCase : Any = self.scheduler.alphas_cumprod[t] _lowerCamelCase : List[str] = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) _lowerCamelCase : Any = 1 - alpha_prod_t _lowerCamelCase : Tuple = self.unet(__lowerCAmelCase , __lowerCAmelCase )['''sample'''] _lowerCamelCase : int = (1 - alpha_prod_t_prev) ** 0.5 * model_output _lowerCamelCase : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) _lowerCamelCase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : torch.Tensor , __lowerCAmelCase : torch.Tensor , __lowerCAmelCase : float ): """simple docstring""" _lowerCamelCase : List[str] = acos(torch.dot(torch.flatten(__lowerCAmelCase ) , torch.flatten(__lowerCAmelCase ) ) / torch.norm(__lowerCAmelCase ) / torch.norm(__lowerCAmelCase ) ) return sin((1 - alpha) * theta ) * xa / sin(__lowerCAmelCase ) + sin(alpha * theta ) * xa / sin(__lowerCAmelCase )
72
"""simple docstring""" import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def snake_case_ ( A_ : Tuple, A_ : int, A_ : Dict ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = LxmertConfig.from_json_file(A_ ) print(F'''Building PyTorch model from configuration: {config}''' ) _lowerCamelCase : List[str] = LxmertForPreTraining(A_ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(A_, A_, A_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), A_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
72
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor lowerCAmelCase__ = logging.get_logger(__name__) class __snake_case ( _lowercase): def __init__( self : List[Any] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Tuple ): """simple docstring""" warnings.warn( '''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use LayoutLMv2ImageProcessor instead.''' , __lowerCAmelCase , ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
72
"""simple docstring""" from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def snake_case_ ( ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : int = 9, 14 # noqa: F841 _lowerCamelCase : List[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _lowerCamelCase : Any = defaultdict(A_ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) _lowerCamelCase : List[str] = mst(A_ ) _lowerCamelCase : Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _lowerCamelCase : int = tuple(answer[:2] ) _lowerCamelCase : int = tuple(edge[::-1] ) assert edge in result or reverse in result
72
1
"""simple docstring""" from __future__ import annotations class __snake_case : def __init__( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : str ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Any = text, pattern _lowerCamelCase , _lowerCamelCase : Optional[int] = len(__lowerCAmelCase ), len(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : str ): """simple docstring""" for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : int ): """simple docstring""" for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : str = [] for i in range(self.textLen - self.patLen + 1 ): _lowerCamelCase : str = self.mismatch_in_text(__lowerCAmelCase ) if mismatch_index == -1: positions.append(__lowerCAmelCase ) else: _lowerCamelCase : str = self.match_in_pattern(self.text[mismatch_index] ) _lowerCamelCase : str = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions lowerCAmelCase__ = '''ABAABA''' lowerCAmelCase__ = '''AB''' lowerCAmelCase__ = BoyerMooreSearch(text, pattern) lowerCAmelCase__ = bms.bad_character_heuristic() if len(positions) == 0: print('''No match found''') else: print('''Pattern found in following positions: ''') print(positions)
72
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all BART models at https://huggingface.co/models?filter=bart lowerCAmelCase__ = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''', }, } lowerCAmelCase__ = { '''facebook/bart-base''': 1024, '''facebook/bart-large''': 1024, '''facebook/bart-large-mnli''': 1024, '''facebook/bart-large-cnn''': 1024, '''facebook/bart-large-xsum''': 1024, '''yjernite/bart_eli5''': 1024, } class __snake_case ( _lowercase): snake_case__ : Any = VOCAB_FILES_NAMES snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ : Optional[int] = ["input_ids", "attention_mask"] snake_case__ : Any = BartTokenizer def __init__( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]="replace" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Union[str, Any]="<unk>" , __lowerCAmelCase : Any="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Optional[Any] , ): """simple docstring""" super().__init__( __lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , ) _lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space: _lowerCamelCase : Dict = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) ) _lowerCamelCase : Any = add_prefix_space _lowerCamelCase : int = pre_tok_class(**__lowerCAmelCase ) _lowerCamelCase : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _lowerCamelCase : List[str] = '''post_processor''' _lowerCamelCase : List[str] = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) if tokenizer_component_instance: _lowerCamelCase : int = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCamelCase : Tuple = tuple(state['''sep'''] ) if "cls" in state: _lowerCamelCase : int = tuple(state['''cls'''] ) _lowerCamelCase : Union[str, Any] = False if state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space: _lowerCamelCase : Dict = add_prefix_space _lowerCamelCase : Optional[Any] = True if state.get('''trim_offsets''' , __lowerCAmelCase ) != trim_offsets: _lowerCamelCase : Any = trim_offsets _lowerCamelCase : str = True if changes_to_apply: _lowerCamelCase : List[str] = getattr(__lowerCAmelCase , state.pop('''type''' ) ) _lowerCamelCase : str = component_class(**__lowerCAmelCase ) setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : Tuple = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value _lowerCamelCase : str = value def SCREAMING_SNAKE_CASE ( self : int , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : Any = kwargs.get('''is_split_into_words''' , __lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ): """simple docstring""" _lowerCamelCase : Tuple = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase ) return tuple(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=None ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ): """simple docstring""" _lowerCamelCase : List[str] = [self.sep_token_id] _lowerCamelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
72
1
"""simple docstring""" from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class __snake_case ( _lowercase): snake_case__ : str = "new-model" if is_tf_available(): class __snake_case ( _lowercase): snake_case__ : str = NewModelConfig @require_tf class __snake_case ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : str = '''bert-base-cased''' _lowerCamelCase : Tuple = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Optional[int] = TFAutoModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : Tuple = '''bert-base-cased''' _lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Optional[Any] = TFAutoModelForPreTraining.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : List[Any] = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : str = TFAutoModelForCausalLM.from_pretrained(__lowerCAmelCase ) _lowerCamelCase , _lowerCamelCase : Any = TFAutoModelForCausalLM.from_pretrained(__lowerCAmelCase , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Any = TFAutoModelWithLMHead.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Tuple = TFAutoModelForMaskedLM.from_pretrained(__lowerCAmelCase ) _lowerCamelCase , _lowerCamelCase : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(__lowerCAmelCase , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Any = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : int = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase ) _lowerCamelCase , _lowerCamelCase : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" for model_name in ["bert-base-uncased"]: _lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : int = TFAutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" for model_name in ["bert-base-uncased"]: _lowerCamelCase : Dict = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : str = TFAutoModelForQuestionAnswering.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow @require_tensorflow_probability def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: _lowerCamelCase : Any = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : int = TFAutoModelForTableQuestionAnswering.from_pretrained(__lowerCAmelCase ) _lowerCamelCase , _lowerCamelCase : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained( __lowerCAmelCase , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=__lowerCAmelCase ) , 1_4_4_1_0 ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : List[str] = TFAutoModelWithLMHead.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=__lowerCAmelCase ) , 1_4_4_1_0 ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : int = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : List[str] = copy.deepcopy(model.config ) _lowerCamelCase : Optional[int] = ['''FunnelBaseModel'''] _lowerCamelCase : str = TFAutoModel.from_config(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__lowerCAmelCase ) _lowerCamelCase : Dict = TFAutoModel.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" try: AutoConfig.register('''new-model''' , __lowerCAmelCase ) _lowerCamelCase : Dict = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(__lowerCAmelCase ): auto_class.register(__lowerCAmelCase , __lowerCAmelCase ) auto_class.register(__lowerCAmelCase , __lowerCAmelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__lowerCAmelCase ): auto_class.register(__lowerCAmelCase , __lowerCAmelCase ) # Now that the config is registered, it can be used as any other config with the auto-API _lowerCamelCase : List[Any] = BertModelTester(self ).get_config() _lowerCamelCase : Optional[int] = NewModelConfig(**tiny_config.to_dict() ) _lowerCamelCase : Tuple = auto_class.from_config(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__lowerCAmelCase ) _lowerCamelCase : Any = auto_class.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" with self.assertRaisesRegex( __lowerCAmelCase , '''bert-base is not a local folder and is not a valid model identifier''' ): _lowerCamelCase : Dict = TFAutoModel.from_pretrained('''bert-base''' ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" with self.assertRaisesRegex( __lowerCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): _lowerCamelCase : Union[str, Any] = TFAutoModel.from_pretrained(__lowerCAmelCase , revision='''aaaaaa''' ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" with self.assertRaisesRegex( __lowerCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ): _lowerCamelCase : Dict = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" with self.assertRaisesRegex(__lowerCAmelCase , '''Use `from_pt=True` to load this model''' ): _lowerCamelCase : Tuple = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: _lowerCamelCase : Dict = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint _lowerCamelCase : Union[str, Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) with RequestCounter() as counter: _lowerCamelCase : Any = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
72
"""simple docstring""" from __future__ import annotations def snake_case_ ( A_ : str ): '''simple docstring''' return [ord(A_ ) - 96 for elem in plain] def snake_case_ ( A_ : list[int] ): '''simple docstring''' return "".join(chr(elem + 96 ) for elem in encoded ) def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Dict = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''', A_ ) print('''Decoded:''', decode(A_ ) ) if __name__ == "__main__": main()
72
1
"""simple docstring""" import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder lowerCAmelCase__ = '''base_with_context''' def snake_case_ ( A_ : Optional[Any], A_ : str ): '''simple docstring''' _lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) ) _lowerCamelCase : str = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ), requires_grad=A_ ) for lyr_num, lyr in enumerate(model.encoders ): _lowerCamelCase : Union[str, Any] = weights[F'''layers_{lyr_num}'''] _lowerCamelCase : Optional[Any] = nn.Parameter( torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) ) _lowerCamelCase : Optional[int] = ly_weight['''attention'''] _lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) _lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) _lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) _lowerCamelCase : str = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) _lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) _lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) ) return model def snake_case_ ( A_ : int, A_ : List[Any] ): '''simple docstring''' _lowerCamelCase : str = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) ) _lowerCamelCase : List[Any] = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ), requires_grad=A_ ) for lyr_num, lyr in enumerate(model.encoders ): _lowerCamelCase : int = weights[F'''layers_{lyr_num}'''] _lowerCamelCase : List[str] = ly_weight['''attention'''] _lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) _lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) _lowerCamelCase : str = nn.Parameter( torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) _lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) _lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) _lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) _lowerCamelCase : int = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) ) return model def snake_case_ ( A_ : Dict, A_ : str ): '''simple docstring''' _lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) ) _lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) ) _lowerCamelCase : Union[str, Any] = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ), requires_grad=A_ ) _lowerCamelCase : str = nn.Parameter( torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) ) for lyr_num, lyr in enumerate(model.decoders ): _lowerCamelCase : Dict = weights[F'''layers_{lyr_num}'''] _lowerCamelCase : Optional[int] = nn.Parameter( torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) ) _lowerCamelCase : str = nn.Parameter( torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) ) _lowerCamelCase : List[str] = ly_weight['''self_attention'''] _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) _lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) _lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) _lowerCamelCase : Union[str, Any] = ly_weight['''MultiHeadDotProductAttention_0'''] _lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) _lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) _lowerCamelCase : int = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) _lowerCamelCase : Tuple = nn.Parameter( torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) ) _lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) _lowerCamelCase : int = nn.Parameter( torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) ) _lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) _lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) _lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) _lowerCamelCase : int = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) ) return model def snake_case_ ( A_ : int ): '''simple docstring''' _lowerCamelCase : Any = checkpoints.load_tax_checkpoint(args.checkpoint_path ) _lowerCamelCase : Optional[int] = jnp.tree_util.tree_map(onp.array, A_ ) _lowerCamelCase : Dict = [ '''from __gin__ import dynamic_registration''', '''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''', '''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''', '''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''', ] _lowerCamelCase : List[Any] = os.path.join(args.checkpoint_path, '''..''', '''config.gin''' ) _lowerCamelCase : Tuple = inference.parse_training_gin_file(A_, A_ ) _lowerCamelCase : Optional[Any] = inference.InferenceModel(args.checkpoint_path, A_ ) _lowerCamelCase : List[str] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''', variance_type='''fixed_large''' ) _lowerCamelCase : List[Any] = SpectrogramNotesEncoder( max_length=synth_model.sequence_length['''inputs'''], vocab_size=synth_model.model.module.config.vocab_size, d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj='''gated-gelu''', ) _lowerCamelCase : List[str] = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims, targets_context_length=synth_model.sequence_length['''targets_context'''], d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj='''gated-gelu''', ) _lowerCamelCase : Dict = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims, targets_length=synth_model.sequence_length['''targets_context'''], max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time, d_model=synth_model.model.module.config.emb_dim, num_layers=synth_model.model.module.config.num_decoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, dropout_rate=synth_model.model.module.config.dropout_rate, ) _lowerCamelCase : Optional[Any] = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''], A_ ) _lowerCamelCase : Any = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''], A_ ) _lowerCamelCase : int = load_decoder(ta_checkpoint['''target''']['''decoder'''], A_ ) _lowerCamelCase : str = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' ) _lowerCamelCase : Optional[int] = SpectrogramDiffusionPipeline( notes_encoder=A_, continuous_encoder=A_, decoder=A_, scheduler=A_, melgan=A_, ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''') parser.add_argument( '''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.''' ) parser.add_argument( '''--checkpoint_path''', default=F"""{MODEL}/checkpoint_500000""", type=str, required=False, help='''Path to the original jax model checkpoint.''', ) lowerCAmelCase__ = parser.parse_args() main(args)
72
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { '''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''], '''tokenization_lxmert''': ['''LxmertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''LxmertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''LxmertEncoder''', '''LxmertForPreTraining''', '''LxmertForQuestionAnswering''', '''LxmertModel''', '''LxmertPreTrainedModel''', '''LxmertVisualFeatureEncoder''', '''LxmertXLayer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLxmertForPreTraining''', '''TFLxmertMainLayer''', '''TFLxmertModel''', '''TFLxmertPreTrainedModel''', '''TFLxmertVisualFeatureEncoder''', ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
1
"""simple docstring""" from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } lowerCAmelCase__ = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } lowerCAmelCase__ = { '''facebook/blenderbot_small-90M''': 512, } class __snake_case ( _lowercase): snake_case__ : List[str] = VOCAB_FILES_NAMES snake_case__ : int = PRETRAINED_VOCAB_FILES_MAP snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ : List[Any] = BlenderbotSmallTokenizer def __init__( self : List[str] , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : str="<|endoftext|>" , __lowerCAmelCase : List[Any]="<|endoftext|>" , __lowerCAmelCase : Optional[Any]="<|endoftext|>" , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : str=True , **__lowerCAmelCase : List[str] , ): """simple docstring""" super().__init__( ByteLevelBPETokenizer( vocab=__lowerCAmelCase , merges=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , ) , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , **__lowerCAmelCase , ) _lowerCamelCase : List[Any] = add_prefix_space def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=None ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ): """simple docstring""" _lowerCamelCase : str = [self.sep_token_id] _lowerCamelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
72
"""simple docstring""" def snake_case_ ( A_ : list[list[float]] ): '''simple docstring''' _lowerCamelCase : list[list[float]] = [] for data in source_data: for i, el in enumerate(A_ ): if len(A_ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(A_ ) ) return data_lists def snake_case_ ( A_ : list[list[float]], A_ : list[int] ): '''simple docstring''' _lowerCamelCase : list[list[float]] = [] for dlist, weight in zip(A_, A_ ): _lowerCamelCase : Any = min(A_ ) _lowerCamelCase : Optional[Any] = max(A_ ) _lowerCamelCase : list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: _lowerCamelCase : str = F'''Invalid weight of {weight:f} provided''' raise ValueError(A_ ) score_lists.append(A_ ) return score_lists def snake_case_ ( A_ : list[list[float]] ): '''simple docstring''' _lowerCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(A_ ): _lowerCamelCase : List[str] = final_scores[j] + ele return final_scores def snake_case_ ( A_ : list[list[float]], A_ : list[int] ): '''simple docstring''' _lowerCamelCase : Tuple = get_data(A_ ) _lowerCamelCase : Optional[Any] = calculate_each_score(A_, A_ ) _lowerCamelCase : str = generate_final_scores(A_ ) # append scores to source data for i, ele in enumerate(A_ ): source_data[i].append(A_ ) return source_data
72
1
"""simple docstring""" def snake_case_ ( A_ : list[list] ): '''simple docstring''' _lowerCamelCase : Optional[int] = current_set.copy() for row_index, row in enumerate(A_ ): _lowerCamelCase : Tuple = row[0] for column_index, column in enumerate(A_ ): if magnitude == 0: _lowerCamelCase : List[Any] = column continue _lowerCamelCase : List[Any] = column / magnitude # Subtract to cancel term _lowerCamelCase : Union[str, Any] = current_set[0] _lowerCamelCase : Dict = [first_row] _lowerCamelCase : str = current_set[1::] for row in current_set: _lowerCamelCase : Union[str, Any] = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(A_ ) continue for column_index in range(len(A_ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(A_ ) # Create next recursion iteration set if len(final_set[0] ) != 3: _lowerCamelCase : Any = final_set[0] _lowerCamelCase : Any = [] _lowerCamelCase : Optional[int] = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _lowerCamelCase : Dict = simplify(A_ ) for i in range(len(A_ ) ): resultant[i].insert(0, current_first_column[i] ) resultant.insert(0, A_ ) _lowerCamelCase : Tuple = resultant return final_set def snake_case_ ( A_ : list[list] ): '''simple docstring''' if len(A_ ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) _lowerCamelCase : Dict = len(A_ ) + 1 if any(len(A_ ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(A_, (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(A_ ) == 1: return [equations[0][-1] / equations[0][0]] _lowerCamelCase : Optional[Any] = equations.copy() if any(0 in row for row in data_set ): _lowerCamelCase : str = data_set.copy() _lowerCamelCase : List[Any] = [] for row_index, row in enumerate(A_ ): if 0 not in row: _lowerCamelCase : Union[str, Any] = data_set.pop(A_ ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0, A_ ) _lowerCamelCase : List[str] = data_set.copy() _lowerCamelCase : int = simplify(A_ ) _lowerCamelCase : int = simplified[::-1] _lowerCamelCase : list = [] for row in simplified: _lowerCamelCase : Tuple = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _lowerCamelCase : Optional[Any] = row.copy()[: len(A_ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(A_ ) == 0: solutions.append(0 ) continue _lowerCamelCase : Tuple = temp_row[1::] _lowerCamelCase : Tuple = temp_row[::-1] for column_index, column in enumerate(A_ ): current_solution -= column * solutions[column_index] solutions.append(A_ ) _lowerCamelCase : Optional[int] = [] for item in solutions: final.append(float(round(A_, 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
72
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __snake_case ( _lowercase): snake_case__ : List[str] = "unispeech" def __init__( self : List[str] , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Dict=1E-5 , __lowerCAmelCase : Optional[int]="group" , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=1_2_8 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=3_2_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=1_0_0 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict="mean" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Dict=8_0 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=0.5 , **__lowerCAmelCase : Optional[Any] , ): """simple docstring""" super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase ) _lowerCamelCase : Dict = hidden_size _lowerCamelCase : Any = feat_extract_norm _lowerCamelCase : List[Any] = feat_extract_activation _lowerCamelCase : Any = list(__lowerCAmelCase ) _lowerCamelCase : Tuple = list(__lowerCAmelCase ) _lowerCamelCase : int = list(__lowerCAmelCase ) _lowerCamelCase : List[str] = conv_bias _lowerCamelCase : List[str] = num_conv_pos_embeddings _lowerCamelCase : Tuple = num_conv_pos_embedding_groups _lowerCamelCase : List[str] = len(self.conv_dim ) _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : List[Any] = intermediate_size _lowerCamelCase : Dict = hidden_act _lowerCamelCase : Union[str, Any] = num_attention_heads _lowerCamelCase : Tuple = hidden_dropout _lowerCamelCase : List[Any] = attention_dropout _lowerCamelCase : Optional[int] = activation_dropout _lowerCamelCase : Optional[Any] = feat_proj_dropout _lowerCamelCase : Optional[int] = final_dropout _lowerCamelCase : Any = layerdrop _lowerCamelCase : Any = layer_norm_eps _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : List[str] = num_ctc_classes _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Optional[Any] = do_stable_layer_norm _lowerCamelCase : Tuple = use_weighted_layer_sum _lowerCamelCase : List[Any] = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCamelCase : Any = apply_spec_augment _lowerCamelCase : Dict = mask_time_prob _lowerCamelCase : List[str] = mask_time_length _lowerCamelCase : Optional[Any] = mask_time_min_masks _lowerCamelCase : List[str] = mask_feature_prob _lowerCamelCase : int = mask_feature_length _lowerCamelCase : Dict = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _lowerCamelCase : Optional[Any] = num_codevectors_per_group _lowerCamelCase : int = num_codevector_groups _lowerCamelCase : List[Any] = contrastive_logits_temperature _lowerCamelCase : List[str] = feat_quantizer_dropout _lowerCamelCase : Dict = num_negatives _lowerCamelCase : Optional[int] = codevector_dim _lowerCamelCase : List[Any] = proj_codevector_dim _lowerCamelCase : List[Any] = diversity_loss_weight # ctc loss _lowerCamelCase : Union[str, Any] = ctc_loss_reduction _lowerCamelCase : Any = ctc_zero_infinity # pretraining loss _lowerCamelCase : str = replace_prob @property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
72
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class __snake_case : def __init__( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=1_3 , __lowerCAmelCase : Tuple=7 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Tuple=9_9 , __lowerCAmelCase : Tuple=3_2 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : List[str]=3_7 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Dict=5_1_2 , __lowerCAmelCase : List[Any]=1_6 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : str=None , ): """simple docstring""" _lowerCamelCase : Dict = parent _lowerCamelCase : Any = 1_3 _lowerCamelCase : List[str] = 7 _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[int] = True _lowerCamelCase : Dict = True _lowerCamelCase : Dict = True _lowerCamelCase : Union[str, Any] = 9_9 _lowerCamelCase : str = 3_2 _lowerCamelCase : Optional[int] = 2 _lowerCamelCase : Union[str, Any] = 4 _lowerCamelCase : Tuple = 3_7 _lowerCamelCase : Optional[int] = '''gelu''' _lowerCamelCase : List[str] = 0.1 _lowerCamelCase : str = 0.1 _lowerCamelCase : List[str] = 5_1_2 _lowerCamelCase : Union[str, Any] = 1_6 _lowerCamelCase : Any = 2 _lowerCamelCase : Any = 0.02 _lowerCamelCase : int = 3 _lowerCamelCase : Optional[Any] = 4 _lowerCamelCase : Optional[Any] = None def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCamelCase : List[str] = None if self.use_input_mask: _lowerCamelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCamelCase : Optional[int] = None if self.use_token_type_ids: _lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowerCamelCase : Any = None _lowerCamelCase : Any = None _lowerCamelCase : List[Any] = None if self.use_labels: _lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCamelCase : str = ids_tensor([self.batch_size] , self.num_choices ) _lowerCamelCase : Dict = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str ): """simple docstring""" _lowerCamelCase : str = TFRoFormerModel(config=__lowerCAmelCase ) _lowerCamelCase : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _lowerCamelCase : Dict = [input_ids, input_mask] _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ): """simple docstring""" _lowerCamelCase : Tuple = True _lowerCamelCase : Any = TFRoFormerForCausalLM(config=__lowerCAmelCase ) _lowerCamelCase : Tuple = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )['''logits'''] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : Any = TFRoFormerForMaskedLM(config=__lowerCAmelCase ) _lowerCamelCase : Tuple = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _lowerCamelCase : Any = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ): """simple docstring""" _lowerCamelCase : List[str] = self.num_labels _lowerCamelCase : str = TFRoFormerForSequenceClassification(config=__lowerCAmelCase ) _lowerCamelCase : Any = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ): """simple docstring""" _lowerCamelCase : List[Any] = self.num_choices _lowerCamelCase : Union[str, Any] = TFRoFormerForMultipleChoice(config=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _lowerCamelCase : Union[str, Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _lowerCamelCase : List[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _lowerCamelCase : str = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } _lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ): """simple docstring""" _lowerCamelCase : Tuple = self.num_labels _lowerCamelCase : Dict = TFRoFormerForTokenClassification(config=__lowerCAmelCase ) _lowerCamelCase : List[str] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase ) _lowerCamelCase : str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _lowerCamelCase : Dict = model(__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : List[str] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : str = config_and_inputs _lowerCamelCase : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __snake_case ( _lowercase , _lowercase , unittest.TestCase): snake_case__ : Tuple = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) snake_case__ : int = ( { "feature-extraction": TFRoFormerModel, "fill-mask": TFRoFormerForMaskedLM, "question-answering": TFRoFormerForQuestionAnswering, "text-classification": TFRoFormerForSequenceClassification, "text-generation": TFRoFormerForCausalLM, "token-classification": TFRoFormerForTokenClassification, "zero-shot": TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) snake_case__ : Tuple = False snake_case__ : Optional[Any] = False def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ): """simple docstring""" if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : Tuple = TFRoFormerModelTester(self ) _lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Tuple = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' ) self.assertIsNotNone(__lowerCAmelCase ) @require_tf class __snake_case ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : Tuple = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) _lowerCamelCase : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) _lowerCamelCase : List[Any] = model(__lowerCAmelCase )[0] # TODO Replace vocab size _lowerCamelCase : int = 5_0_0_0_0 _lowerCamelCase : Optional[int] = [1, 6, vocab_size] self.assertEqual(output.shape , __lowerCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. _lowerCamelCase : str = tf.constant( [ [ [-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46], [-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07], [-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) @require_tf class __snake_case ( unittest.TestCase): snake_case__ : Union[str, Any] = 1e-4 def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : int = tf.constant([[4, 1_0]] ) _lowerCamelCase : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) _lowerCamelCase : Any = emba(input_ids.shape ) _lowerCamelCase : Union[str, Any] = tf.constant( [[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] ) tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : List[str] = tf.constant( [ [0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00], [0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17], [0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70], ] ) _lowerCamelCase : Any = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 ) emba([2, 1_6, 5_1_2] ) _lowerCamelCase : Optional[Any] = emba.weight[:3, :5] tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance ) @require_tf class __snake_case ( unittest.TestCase): snake_case__ : List[str] = 1e-4 def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : List[str] = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0 _lowerCamelCase : Optional[int] = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0 _lowerCamelCase : Optional[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 ) _lowerCamelCase : Optional[Any] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :] _lowerCamelCase , _lowerCamelCase : str = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = tf.constant( [ [0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00], [-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43], [-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85], [-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71], [0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80], [3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53], ] ) _lowerCamelCase : Any = tf.constant( [ [0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00], [0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43], [1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85], [2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71], [-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80], [-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
72
"""simple docstring""" from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def snake_case_ ( A_ : str, A_ : str, A_ : Optional[str] = None ): '''simple docstring''' if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release: # old versions of hfh don't url-encode the file path _lowerCamelCase : Optional[Any] = quote(A_ ) return hfh.hf_hub_url(A_, A_, repo_type='''dataset''', revision=A_ )
72
1
"""simple docstring""" import os import string import sys lowerCAmelCase__ = 1 << 8 lowerCAmelCase__ = { '''tab''': ord('''\t'''), '''newline''': ord('''\r'''), '''esc''': 27, '''up''': 65 + ARROW_KEY_FLAG, '''down''': 66 + ARROW_KEY_FLAG, '''right''': 67 + ARROW_KEY_FLAG, '''left''': 68 + ARROW_KEY_FLAG, '''mod_int''': 91, '''undefined''': sys.maxsize, '''interrupt''': 3, '''insert''': 50, '''delete''': 51, '''pg_up''': 53, '''pg_down''': 54, } lowerCAmelCase__ = KEYMAP['''up'''] lowerCAmelCase__ = KEYMAP['''left'''] if sys.platform == "win32": lowerCAmelCase__ = [] lowerCAmelCase__ = { b'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, b'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, b'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, b'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, b'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, b'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, b'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, b'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, } for i in range(10): lowerCAmelCase__ = ord(str(i)) def snake_case_ ( ): '''simple docstring''' if os.name == "nt": import msvcrt _lowerCamelCase : str = '''mbcs''' # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(A_ ) == 0: # Read the keystroke _lowerCamelCase : Optional[Any] = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): _lowerCamelCase : Optional[int] = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: _lowerCamelCase : str = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) ) WIN_CH_BUFFER.append(A_ ) if ord(A_ ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(1_26 ) ) _lowerCamelCase : str = chr(KEYMAP['''esc'''] ) except KeyError: _lowerCamelCase : List[Any] = cha[1] else: _lowerCamelCase : int = ch.decode(A_ ) else: _lowerCamelCase : Optional[Any] = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty _lowerCamelCase : Optional[Any] = sys.stdin.fileno() _lowerCamelCase : List[Any] = termios.tcgetattr(A_ ) try: tty.setraw(A_ ) _lowerCamelCase : Tuple = sys.stdin.read(1 ) finally: termios.tcsetattr(A_, termios.TCSADRAIN, A_ ) return ch def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Tuple = get_raw_chars() if ord(A_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(A_ ) == KEYMAP["esc"]: _lowerCamelCase : Optional[int] = get_raw_chars() if ord(A_ ) == KEYMAP["mod_int"]: _lowerCamelCase : Dict = get_raw_chars() if ord(A_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(A_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(A_ ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
72
"""simple docstring""" import unittest import numpy as np def snake_case_ ( A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray | None = None, ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = np.shape(A_ ) _lowerCamelCase : List[str] = np.shape(A_ ) _lowerCamelCase : List[str] = np.shape(A_ ) if shape_a[0] != shape_b[0]: _lowerCamelCase : Tuple = ( '''Expected the same number of rows for A and B. ''' F'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(A_ ) if shape_b[1] != shape_c[1]: _lowerCamelCase : Tuple = ( '''Expected the same number of columns for B and C. ''' F'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(A_ ) _lowerCamelCase : List[str] = pseudo_inv if a_inv is None: try: _lowerCamelCase : Any = np.linalg.inv(A_ ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : List[str] = np.array([[2, 1], [6, 3]] ) _lowerCamelCase : List[Any] = schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Dict = np.block([[a, b], [b.T, c]] ) _lowerCamelCase : Tuple = np.linalg.det(__lowerCAmelCase ) _lowerCamelCase : List[str] = np.linalg.det(__lowerCAmelCase ) _lowerCamelCase : Any = np.linalg.det(__lowerCAmelCase ) self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : int = np.array([[2, 1], [6, 3]] ) with self.assertRaises(__lowerCAmelCase ): schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : Union[str, Any] = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(__lowerCAmelCase ): schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
72
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class __snake_case ( unittest.TestCase): def __init__( self : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str]=7 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Dict=1_8 , __lowerCAmelCase : str=3_0 , __lowerCAmelCase : Union[str, Any]=4_0_0 , __lowerCAmelCase : int=True , __lowerCAmelCase : Any=None , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , __lowerCAmelCase : Optional[Any]=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , __lowerCAmelCase : Optional[int]=True , ): """simple docstring""" _lowerCamelCase : Dict = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} _lowerCamelCase : Dict = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8} _lowerCamelCase : Dict = parent _lowerCamelCase : List[Any] = batch_size _lowerCamelCase : List[Any] = num_channels _lowerCamelCase : Optional[Any] = image_size _lowerCamelCase : List[Any] = min_resolution _lowerCamelCase : Optional[Any] = max_resolution _lowerCamelCase : Dict = do_resize _lowerCamelCase : List[Any] = size _lowerCamelCase : Tuple = do_center_crop _lowerCamelCase : List[str] = crop_size _lowerCamelCase : Dict = do_normalize _lowerCamelCase : Optional[Any] = image_mean _lowerCamelCase : Dict = image_std _lowerCamelCase : Optional[Any] = do_convert_rgb def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False ): """simple docstring""" assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: _lowerCamelCase : Optional[Any] = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: _lowerCamelCase : Union[str, Any] = [] for i in range(self.batch_size ): _lowerCamelCase , _lowerCamelCase : Dict = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension _lowerCamelCase : Any = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] if torchify: _lowerCamelCase : List[Any] = [torch.from_numpy(__lowerCAmelCase ) for x in image_inputs] return image_inputs @require_torch @require_vision class __snake_case ( _lowercase , unittest.TestCase): snake_case__ : Union[str, Any] = ChineseCLIPImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=__lowerCAmelCase ) @property def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_center_crop''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''center_crop''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_convert_rgb''' ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 2_2_4, '''width''': 2_2_4} ) self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} ) _lowerCamelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} ) self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCamelCase : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _lowerCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _lowerCamelCase : str = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCamelCase : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _lowerCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _lowerCamelCase : Optional[int] = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _lowerCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _lowerCamelCase : Union[str, Any] = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) @require_torch @require_vision class __snake_case ( _lowercase , unittest.TestCase): snake_case__ : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : Any = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__lowerCAmelCase ) _lowerCamelCase : List[Any] = 3 @property def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_center_crop''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''center_crop''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_convert_rgb''' ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCamelCase : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _lowerCamelCase : str = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
72
"""simple docstring""" def snake_case_ ( A_ : list[int], A_ : str ): '''simple docstring''' _lowerCamelCase : Tuple = int(A_ ) # Initialize Result _lowerCamelCase : Dict = [] # Traverse through all denomination for denomination in reversed(A_ ): # Find denominations while int(A_ ) >= int(A_ ): total_value -= int(A_ ) answer.append(A_ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": lowerCAmelCase__ = [] lowerCAmelCase__ = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): lowerCAmelCase__ = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) lowerCAmelCase__ = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000] lowerCAmelCase__ = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(F"""Following is minimal change for {value}: """) lowerCAmelCase__ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
72
1
"""simple docstring""" import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] ): """simple docstring""" self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertAlmostEqual(__lowerCAmelCase , __lowerCAmelCase , delta=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : List[str] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(__lowerCAmelCase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = None ops.enable_eager_execution_internal() _lowerCamelCase : List[Any] = tf.config.list_physical_devices('''CPU''' ) if len(__lowerCAmelCase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) _lowerCamelCase : str = tf.config.list_logical_devices(device_type='''CPU''' ) _lowerCamelCase : Union[str, Any] = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): _lowerCamelCase : Union[str, Any] = GradientAccumulator() _lowerCamelCase : Dict = tf.Variable([4.0, 3.0] ) _lowerCamelCase , _lowerCamelCase : Tuple = create_optimizer(5E-5 , 1_0 , 5 ) _lowerCamelCase : Tuple = tf.Variable([0.0, 0.0] , trainable=__lowerCAmelCase ) def accumulate_on_replica(__lowerCAmelCase : Optional[int] ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(__lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ): with strategy.scope(): _lowerCamelCase : Optional[int] = strategy.experimental_local_results(__lowerCAmelCase ) local_variables[0].assign(__lowerCAmelCase ) local_variables[1].assign(__lowerCAmelCase ) strategy.run(__lowerCAmelCase , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(__lowerCAmelCase ) def _check_local_values(__lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] ): _lowerCamelCase : str = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , __lowerCAmelCase , tol=1E-2 ) self.assertListAlmostEqual(values[1].value() , __lowerCAmelCase , tol=1E-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
72
"""simple docstring""" def snake_case_ ( A_ : int = 2_00_00_00 ): '''simple docstring''' _lowerCamelCase : int = [0 for i in range(n + 1 )] _lowerCamelCase : List[str] = 1 _lowerCamelCase : Any = 1 for i in range(2, int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i, n + 1, A_ ): _lowerCamelCase : str = 1 _lowerCamelCase : Tuple = 0 for i in range(A_ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(F"""{solution() = }""")
72
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def snake_case_ ( A_ : Any ): '''simple docstring''' _lowerCamelCase : Any = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(A_, A_ ) def snake_case_ ( A_ : Union[str, Any] ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : Tuple = emb.weight.shape _lowerCamelCase : Dict = nn.Linear(A_, A_, bias=A_ ) _lowerCamelCase : str = emb.weight.data return lin_layer def snake_case_ ( A_ : str, A_ : Optional[int]="facebook/mbart-large-en-ro", A_ : Union[str, Any]=False, A_ : List[str]=False ): '''simple docstring''' _lowerCamelCase : Tuple = torch.load(A_, map_location='''cpu''' )['''model'''] remove_ignore_keys_(A_ ) _lowerCamelCase : int = state_dict['''encoder.embed_tokens.weight'''].shape[0] _lowerCamelCase : Any = MBartConfig.from_pretrained(A_, vocab_size=A_ ) if mbart_aa and finetuned: _lowerCamelCase : Any = '''relu''' _lowerCamelCase : Optional[int] = state_dict['''decoder.embed_tokens.weight'''] _lowerCamelCase : Any = MBartForConditionalGeneration(A_ ) model.model.load_state_dict(A_ ) if finetuned: _lowerCamelCase : str = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
72
1
"""simple docstring""" def snake_case_ ( A_ : int, A_ : int ): '''simple docstring''' return int(input_a == input_a == 0 ) def snake_case_ ( ): '''simple docstring''' print('''Truth Table of NOR Gate:''' ) print('''| Input 1 | Input 2 | Output |''' ) print(F'''| 0 | 0 | {nor_gate(0, 0 )} |''' ) print(F'''| 0 | 1 | {nor_gate(0, 1 )} |''' ) print(F'''| 1 | 0 | {nor_gate(1, 0 )} |''' ) print(F'''| 1 | 1 | {nor_gate(1, 1 )} |''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
72
"""simple docstring""" def snake_case_ ( A_ : list[list] ): '''simple docstring''' _lowerCamelCase : Optional[int] = current_set.copy() for row_index, row in enumerate(A_ ): _lowerCamelCase : Tuple = row[0] for column_index, column in enumerate(A_ ): if magnitude == 0: _lowerCamelCase : List[Any] = column continue _lowerCamelCase : List[Any] = column / magnitude # Subtract to cancel term _lowerCamelCase : Union[str, Any] = current_set[0] _lowerCamelCase : Dict = [first_row] _lowerCamelCase : str = current_set[1::] for row in current_set: _lowerCamelCase : Union[str, Any] = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(A_ ) continue for column_index in range(len(A_ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(A_ ) # Create next recursion iteration set if len(final_set[0] ) != 3: _lowerCamelCase : Any = final_set[0] _lowerCamelCase : Any = [] _lowerCamelCase : Optional[int] = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _lowerCamelCase : Dict = simplify(A_ ) for i in range(len(A_ ) ): resultant[i].insert(0, current_first_column[i] ) resultant.insert(0, A_ ) _lowerCamelCase : Tuple = resultant return final_set def snake_case_ ( A_ : list[list] ): '''simple docstring''' if len(A_ ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) _lowerCamelCase : Dict = len(A_ ) + 1 if any(len(A_ ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(A_, (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(A_ ) == 1: return [equations[0][-1] / equations[0][0]] _lowerCamelCase : Optional[Any] = equations.copy() if any(0 in row for row in data_set ): _lowerCamelCase : str = data_set.copy() _lowerCamelCase : List[Any] = [] for row_index, row in enumerate(A_ ): if 0 not in row: _lowerCamelCase : Union[str, Any] = data_set.pop(A_ ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0, A_ ) _lowerCamelCase : List[str] = data_set.copy() _lowerCamelCase : int = simplify(A_ ) _lowerCamelCase : int = simplified[::-1] _lowerCamelCase : list = [] for row in simplified: _lowerCamelCase : Tuple = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _lowerCamelCase : Optional[Any] = row.copy()[: len(A_ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(A_ ) == 0: solutions.append(0 ) continue _lowerCamelCase : Tuple = temp_row[1::] _lowerCamelCase : Tuple = temp_row[::-1] for column_index, column in enumerate(A_ ): current_solution -= column * solutions[column_index] solutions.append(A_ ) _lowerCamelCase : Optional[int] = [] for item in solutions: final.append(float(round(A_, 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
72
1
"""simple docstring""" from importlib import import_module from .logging import get_logger lowerCAmelCase__ = get_logger(__name__) class __snake_case : def __init__( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any]=None ): """simple docstring""" _lowerCamelCase : Tuple = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('''__''' ): setattr(self , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) ) _lowerCamelCase : Any = module._original_module if isinstance(__lowerCAmelCase , _PatchedModuleObj ) else module class __snake_case : snake_case__ : Dict = [] def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=None ): """simple docstring""" _lowerCamelCase : Optional[Any] = obj _lowerCamelCase : int = target _lowerCamelCase : int = new _lowerCamelCase : Tuple = target.split('''.''' )[0] _lowerCamelCase : Optional[Any] = {} _lowerCamelCase : Optional[int] = attrs or [] def __enter__( self : Optional[Any] ): """simple docstring""" *_lowerCamelCase , _lowerCamelCase : Optional[int] = self.target.split('''.''' ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(__lowerCAmelCase ) ): try: _lowerCamelCase : Union[str, Any] = import_module('''.'''.join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): _lowerCamelCase : Dict = getattr(self.obj , __lowerCAmelCase ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(__lowerCAmelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): _lowerCamelCase : Dict = obj_attr # patch at top level setattr(self.obj , __lowerCAmelCase , _PatchedModuleObj(__lowerCAmelCase , attrs=self.attrs ) ) _lowerCamelCase : Tuple = getattr(self.obj , __lowerCAmelCase ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(__lowerCAmelCase , __lowerCAmelCase , _PatchedModuleObj(getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , attrs=self.attrs ) ) _lowerCamelCase : int = getattr(__lowerCAmelCase , __lowerCAmelCase ) # finally set the target attribute setattr(__lowerCAmelCase , __lowerCAmelCase , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: _lowerCamelCase : Optional[int] = getattr(import_module('''.'''.join(__lowerCAmelCase ) ) , __lowerCAmelCase ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , __lowerCAmelCase ) is attr_value: _lowerCamelCase : str = getattr(self.obj , __lowerCAmelCase ) setattr(self.obj , __lowerCAmelCase , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" _lowerCamelCase : List[str] = globals()['''__builtins__'''][target_attr] setattr(self.obj , __lowerCAmelCase , self.new ) else: raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' ) def __exit__( self : Any , *__lowerCAmelCase : Dict ): """simple docstring""" for attr in list(self.original ): setattr(self.obj , __lowerCAmelCase , self.original.pop(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" self.__enter__() self._active_patches.append(self ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
72
"""simple docstring""" import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __snake_case ( _lowercase): snake_case__ : List[Any] = "Speech2TextFeatureExtractor" snake_case__ : Union[str, Any] = "Speech2TextTokenizer" def __init__( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" super().__init__(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : List[str] = self.feature_extractor _lowerCamelCase : str = False def __call__( self : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) _lowerCamelCase : str = kwargs.pop('''raw_speech''' ) else: _lowerCamelCase : Tuple = kwargs.pop('''audio''' , __lowerCAmelCase ) _lowerCamelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = kwargs.pop('''text''' , __lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: _lowerCamelCase : List[Any] = args[0] _lowerCamelCase : int = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: _lowerCamelCase : List[Any] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None: _lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: _lowerCamelCase : List[str] = encodings['''input_ids'''] return inputs def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ): """simple docstring""" return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @contextmanager def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) _lowerCamelCase : Union[str, Any] = True _lowerCamelCase : Any = self.tokenizer yield _lowerCamelCase : List[str] = self.feature_extractor _lowerCamelCase : Tuple = False
72
1
"""simple docstring""" import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class __snake_case ( _lowercase , unittest.TestCase): snake_case__ : Dict = BlenderbotSmallTokenizer snake_case__ : Tuple = False def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" super().setUp() _lowerCamelCase : Dict = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] _lowerCamelCase : List[Any] = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) _lowerCamelCase : Tuple = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] _lowerCamelCase : Tuple = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} _lowerCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any , **__lowerCAmelCase : Dict ): """simple docstring""" kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Any ): """simple docstring""" _lowerCamelCase : List[str] = '''adapt act apte''' _lowerCamelCase : Union[str, Any] = '''adapt act apte''' return input_text, output_text def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _lowerCamelCase : Optional[int] = '''adapt act apte''' _lowerCamelCase : Dict = ['''adapt''', '''act''', '''ap@@''', '''te'''] _lowerCamelCase : Tuple = tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Optional[Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] _lowerCamelCase : str = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Dict = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [1_3_8_4] _lowerCamelCase : Optional[int] = '''I am a small frog.''' _lowerCamelCase : Optional[int] = tok([src_text] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase )['''input_ids'''] _lowerCamelCase : str = tok.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) _lowerCamelCase : Dict = '''I am a small frog .''' _lowerCamelCase : Tuple = '''.''' _lowerCamelCase : List[str] = tok(__lowerCAmelCase )['''input_ids'''] _lowerCamelCase : Dict = tok(__lowerCAmelCase )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
72
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''', '''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''', # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class __snake_case ( _lowercase): snake_case__ : str = "xlm-roberta-xl" def __init__( self : Optional[int] , __lowerCAmelCase : int=2_5_0_8_8_0 , __lowerCAmelCase : Union[str, Any]=2_5_6_0 , __lowerCAmelCase : Tuple=3_6 , __lowerCAmelCase : str=3_2 , __lowerCAmelCase : int=1_0_2_4_0 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=5_1_4 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : str=1E-05 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Dict="absolute" , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Any , ): """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) _lowerCamelCase : Tuple = vocab_size _lowerCamelCase : Optional[Any] = hidden_size _lowerCamelCase : int = num_hidden_layers _lowerCamelCase : str = num_attention_heads _lowerCamelCase : Optional[Any] = hidden_act _lowerCamelCase : str = intermediate_size _lowerCamelCase : Any = hidden_dropout_prob _lowerCamelCase : Dict = attention_probs_dropout_prob _lowerCamelCase : Union[str, Any] = max_position_embeddings _lowerCamelCase : int = type_vocab_size _lowerCamelCase : List[str] = initializer_range _lowerCamelCase : Optional[int] = layer_norm_eps _lowerCamelCase : int = position_embedding_type _lowerCamelCase : Dict = use_cache _lowerCamelCase : Union[str, Any] = classifier_dropout class __snake_case ( _lowercase): @property def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" if self.task == "multiple-choice": _lowerCamelCase : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _lowerCamelCase : Dict = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
72
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_autoformer''': [ '''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AutoformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AutoformerForPrediction''', '''AutoformerModel''', '''AutoformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __snake_case ( _lowercase): snake_case__ : Dict = "Salesforce/blip-image-captioning-base" snake_case__ : Dict = ( "This is a tool that generates a description of an image. It takes an input named `image` which should be the " "image to caption, and returns a text that contains the description in English." ) snake_case__ : Tuple = "image_captioner" snake_case__ : Optional[Any] = AutoModelForVisionaSeq snake_case__ : List[str] = ["image"] snake_case__ : int = ["text"] def __init__( self : Optional[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : Dict ): """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : "Image" ): """simple docstring""" return self.pre_processor(images=__lowerCAmelCase , return_tensors='''pt''' ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Any ): """simple docstring""" return self.model.generate(**__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Tuple ): """simple docstring""" return self.pre_processor.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )[0].strip()
72
"""simple docstring""" import math def snake_case_ ( A_ : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def snake_case_ ( A_ : float = 0.1 ): '''simple docstring''' _lowerCamelCase : Optional[int] = 3 _lowerCamelCase : List[str] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ): primes += is_prime(A_ ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
72
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase__ = { '''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''], '''tokenization_tapas''': ['''TapasTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TapasForMaskedLM''', '''TapasForQuestionAnswering''', '''TapasForSequenceClassification''', '''TapasModel''', '''TapasPreTrainedModel''', '''load_tf_weights_in_tapas''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFTapasForMaskedLM''', '''TFTapasForQuestionAnswering''', '''TFTapasForSequenceClassification''', '''TFTapasModel''', '''TFTapasPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _lowerCamelCase : Union[str, Any] = TextStreamer(__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCamelCase : int = cs.out[:-1] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : List[str] = tokenizer.decode(greedy_ids[0] ) _lowerCamelCase : Tuple = TextIteratorStreamer(__lowerCAmelCase ) _lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase ) thread.start() _lowerCamelCase : int = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : int = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = greedy_ids[:, input_ids.shape[1] :] _lowerCamelCase : int = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _lowerCamelCase : Any = TextStreamer(__lowerCAmelCase , skip_prompt=__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCamelCase : Union[str, Any] = cs.out[:-1] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' ) _lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : str = -1 _lowerCamelCase : Any = torch.ones((1, 5) , device=__lowerCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: _lowerCamelCase : List[Any] = TextStreamer(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _lowerCamelCase : Any = cs.out[:-1] # Remove the final "\n" _lowerCamelCase : int = tokenizer(__lowerCAmelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = -1 _lowerCamelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = TextIteratorStreamer(__lowerCAmelCase , timeout=0.0_01 ) _lowerCamelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__lowerCAmelCase ): _lowerCamelCase : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text
72
1
"""simple docstring""" class __snake_case : def __init__( self : Optional[int] ): """simple docstring""" _lowerCamelCase : dict[str, TrieNode] = {} # Mapping from char to TrieNode _lowerCamelCase : Tuple = False def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : list[str] ): """simple docstring""" for word in words: self.insert(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str ): """simple docstring""" _lowerCamelCase : List[Any] = self for char in word: if char not in curr.nodes: _lowerCamelCase : List[str] = TrieNode() _lowerCamelCase : Union[str, Any] = curr.nodes[char] _lowerCamelCase : Any = True def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : str ): """simple docstring""" _lowerCamelCase : List[Any] = self for char in word: if char not in curr.nodes: return False _lowerCamelCase : Union[str, Any] = curr.nodes[char] return curr.is_leaf def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : str ): """simple docstring""" def _delete(__lowerCAmelCase : TrieNode , __lowerCAmelCase : str , __lowerCAmelCase : int ) -> bool: if index == len(__lowerCAmelCase ): # If word does not exist if not curr.is_leaf: return False _lowerCamelCase : List[Any] = False return len(curr.nodes ) == 0 _lowerCamelCase : List[Any] = word[index] _lowerCamelCase : Optional[Any] = curr.nodes.get(__lowerCAmelCase ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted _lowerCamelCase : Union[str, Any] = _delete(__lowerCAmelCase , __lowerCAmelCase , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , __lowerCAmelCase , 0 ) def snake_case_ ( A_ : TrieNode, A_ : str ): '''simple docstring''' if node.is_leaf: print(A_, end=''' ''' ) for key, value in node.nodes.items(): print_words(A_, word + key ) def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Optional[Any] = '''banana bananas bandana band apple all beast'''.split() _lowerCamelCase : List[str] = TrieNode() root.insert_many(A_ ) # print_words(root, "") assert all(root.find(A_ ) for word in words ) assert root.find('''banana''' ) assert not root.find('''bandanas''' ) assert not root.find('''apps''' ) assert root.find('''apple''' ) assert root.find('''all''' ) root.delete('''all''' ) assert not root.find('''all''' ) root.delete('''banana''' ) assert not root.find('''banana''' ) assert root.find('''bananas''' ) return True def snake_case_ ( A_ : str, A_ : bool ): '''simple docstring''' print(str(A_ ), '''works!''' if passes else '''doesn\'t work :(''' ) def snake_case_ ( ): '''simple docstring''' assert test_trie() def snake_case_ ( ): '''simple docstring''' print_results('''Testing trie functionality''', test_trie() ) if __name__ == "__main__": main()
72
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase__ = logging.get_logger(__name__) # TODO: upload to AWS lowerCAmelCase__ = { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json''' ), } class __snake_case ( _lowercase): snake_case__ : int = "retribert" def __init__( self : Optional[int] , __lowerCAmelCase : str=3_0_5_2_2 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1E-12 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : str , ): """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) _lowerCamelCase : Dict = vocab_size _lowerCamelCase : Union[str, Any] = hidden_size _lowerCamelCase : Dict = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : int = hidden_act _lowerCamelCase : str = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : List[Any] = attention_probs_dropout_prob _lowerCamelCase : Optional[int] = max_position_embeddings _lowerCamelCase : List[Any] = type_vocab_size _lowerCamelCase : Any = initializer_range _lowerCamelCase : Optional[int] = layer_norm_eps _lowerCamelCase : int = share_encoders _lowerCamelCase : Optional[Any] = projection_dim
72
1
"""simple docstring""" import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''', } class __snake_case ( _lowercase): snake_case__ : Dict = "align_text_model" def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=3_0_5_2_2 , __lowerCAmelCase : Optional[int]=7_6_8 , __lowerCAmelCase : Dict=1_2 , __lowerCAmelCase : Optional[int]=1_2 , __lowerCAmelCase : Optional[Any]=3_0_7_2 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Any=5_1_2 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : List[str]=1E-12 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[int]="absolute" , __lowerCAmelCase : Optional[Any]=True , **__lowerCAmelCase : Any , ): """simple docstring""" super().__init__(**__lowerCAmelCase ) _lowerCamelCase : Tuple = vocab_size _lowerCamelCase : List[str] = hidden_size _lowerCamelCase : str = num_hidden_layers _lowerCamelCase : Union[str, Any] = num_attention_heads _lowerCamelCase : Dict = hidden_act _lowerCamelCase : Tuple = intermediate_size _lowerCamelCase : List[Any] = hidden_dropout_prob _lowerCamelCase : Tuple = attention_probs_dropout_prob _lowerCamelCase : int = max_position_embeddings _lowerCamelCase : Optional[Any] = type_vocab_size _lowerCamelCase : Any = initializer_range _lowerCamelCase : List[str] = layer_norm_eps _lowerCamelCase : List[Any] = position_embedding_type _lowerCamelCase : Union[str, Any] = use_cache _lowerCamelCase : Any = pad_token_id @classmethod def SCREAMING_SNAKE_CASE ( cls : List[Any] , __lowerCAmelCase : Union[str, os.PathLike] , **__lowerCAmelCase : Optional[Any] ): """simple docstring""" cls._set_token_in_kwargs(__lowerCAmelCase ) _lowerCamelCase , _lowerCamelCase : List[str] = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase ) # get the text config dict if we are loading from AlignConfig if config_dict.get('''model_type''' ) == "align": _lowerCamelCase : int = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase ) class __snake_case ( _lowercase): snake_case__ : Optional[Any] = "align_vision_model" def __init__( self : int , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 6_0_0 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __lowerCAmelCase : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2_5_6_0 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.0_01 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : List[str] , ): """simple docstring""" super().__init__(**__lowerCAmelCase ) _lowerCamelCase : Optional[int] = num_channels _lowerCamelCase : str = image_size _lowerCamelCase : int = width_coefficient _lowerCamelCase : str = depth_coefficient _lowerCamelCase : int = depth_divisor _lowerCamelCase : Union[str, Any] = kernel_sizes _lowerCamelCase : Optional[Any] = in_channels _lowerCamelCase : Optional[Any] = out_channels _lowerCamelCase : List[Any] = depthwise_padding _lowerCamelCase : str = strides _lowerCamelCase : List[str] = num_block_repeats _lowerCamelCase : List[str] = expand_ratios _lowerCamelCase : Any = squeeze_expansion_ratio _lowerCamelCase : Any = hidden_act _lowerCamelCase : List[Any] = hidden_dim _lowerCamelCase : Dict = pooling_type _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : Optional[int] = batch_norm_eps _lowerCamelCase : Dict = batch_norm_momentum _lowerCamelCase : List[Any] = drop_connect_rate _lowerCamelCase : int = sum(__lowerCAmelCase ) * 4 @classmethod def SCREAMING_SNAKE_CASE ( cls : str , __lowerCAmelCase : Union[str, os.PathLike] , **__lowerCAmelCase : Tuple ): """simple docstring""" cls._set_token_in_kwargs(__lowerCAmelCase ) _lowerCamelCase , _lowerCamelCase : Dict = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase ) # get the vision config dict if we are loading from AlignConfig if config_dict.get('''model_type''' ) == "align": _lowerCamelCase : Dict = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase ) class __snake_case ( _lowercase): snake_case__ : Tuple = "align" snake_case__ : List[str] = True def __init__( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Union[str, Any]=6_4_0 , __lowerCAmelCase : int=1.0 , __lowerCAmelCase : List[str]=0.02 , **__lowerCAmelCase : Optional[Any] , ): """simple docstring""" super().__init__(**__lowerCAmelCase ) if text_config is None: _lowerCamelCase : Optional[Any] = {} logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' ) if vision_config is None: _lowerCamelCase : List[str] = {} logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' ) _lowerCamelCase : List[str] = AlignTextConfig(**__lowerCAmelCase ) _lowerCamelCase : Any = AlignVisionConfig(**__lowerCAmelCase ) _lowerCamelCase : Any = projection_dim _lowerCamelCase : Any = temperature_init_value _lowerCamelCase : Any = initializer_range @classmethod def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , __lowerCAmelCase : AlignTextConfig , __lowerCAmelCase : AlignVisionConfig , **__lowerCAmelCase : Optional[int] ): """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ ) _lowerCamelCase : List[str] = self.text_config.to_dict() _lowerCamelCase : Tuple = self.vision_config.to_dict() _lowerCamelCase : Union[str, Any] = self.__class__.model_type return output
72
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : Optional[int] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : str = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] _lowerCamelCase : Optional[Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : Tuple = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _lowerCamelCase : Any = '''fp16''' self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] _lowerCamelCase : str = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] _lowerCamelCase : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : int = '''fp16''' self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
72
1
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME lowerCAmelCase__ = ['''small''', '''medium''', '''large'''] lowerCAmelCase__ = '''lm_head.decoder.weight''' lowerCAmelCase__ = '''lm_head.weight''' def snake_case_ ( A_ : str, A_ : str ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = torch.load(A_ ) _lowerCamelCase : Optional[Any] = d.pop(A_ ) os.makedirs(A_, exist_ok=A_ ) torch.save(A_, os.path.join(A_, A_ ) ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('''--dialogpt_path''', default='''.''', type=str) lowerCAmelCase__ = parser.parse_args() for MODEL in DIALOGPT_MODELS: lowerCAmelCase__ = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""") lowerCAmelCase__ = F"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
72
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class __snake_case : def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=1_0 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=3_2 * 4 , __lowerCAmelCase : Dict=3_2 * 6 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=3_2 , ): """simple docstring""" _lowerCamelCase : List[str] = parent _lowerCamelCase : str = batch_size _lowerCamelCase : Dict = is_training _lowerCamelCase : str = use_auxiliary_loss _lowerCamelCase : Any = num_queries _lowerCamelCase : List[Any] = num_channels _lowerCamelCase : int = min_size _lowerCamelCase : Any = max_size _lowerCamelCase : int = num_labels _lowerCamelCase : List[str] = mask_feature_size def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowerCAmelCase ) _lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5 ).float() _lowerCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long() _lowerCamelCase : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.prepare_config_and_inputs() _lowerCamelCase : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = output.encoder_hidden_states _lowerCamelCase : Tuple = output.pixel_decoder_hidden_states _lowerCamelCase : Dict = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers ) def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=False ): """simple docstring""" with torch.no_grad(): _lowerCamelCase : Optional[int] = MaskFormerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Tuple = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) _lowerCamelCase : List[str] = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() def comm_check_on_output(__lowerCAmelCase : Dict ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _lowerCamelCase : str = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) _lowerCamelCase : List[str] = model( pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __snake_case ( _lowercase , _lowercase , unittest.TestCase): snake_case__ : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () snake_case__ : Any = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) snake_case__ : List[str] = False snake_case__ : List[str] = False snake_case__ : Optional[int] = False snake_case__ : Dict = False def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = MaskFormerModelTester(self ) _lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Dict = [*signature.parameters.keys()] _lowerCamelCase : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: _lowerCamelCase : Union[str, Any] = MaskFormerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2 _lowerCamelCase : Union[str, Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=__lowerCAmelCase ), '''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowerCAmelCase ), '''class_labels''': torch.zeros(2 , 1_0 , device=__lowerCAmelCase ).long(), } _lowerCamelCase : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _lowerCamelCase : Union[str, Any] = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : Any = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _lowerCamelCase : List[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : int = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[int] = True _lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) _lowerCamelCase : List[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCamelCase : Tuple = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _lowerCamelCase : List[str] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCamelCase : Optional[int] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) lowerCAmelCase__ = 1E-4 def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class __snake_case ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : str = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowerCAmelCase ) _lowerCamelCase : Any = self.default_image_processor _lowerCamelCase : List[Any] = prepare_img() _lowerCamelCase : Any = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : Any = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : int = model(**__lowerCAmelCase ) _lowerCamelCase : str = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) _lowerCamelCase : Union[str, Any] = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) _lowerCamelCase : Optional[int] = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[int] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : str = prepare_img() _lowerCamelCase : int = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : str = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : Optional[int] = model(**__lowerCAmelCase ) # masks_queries_logits _lowerCamelCase : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCamelCase : List[str] = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] _lowerCamelCase : Any = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits _lowerCamelCase : List[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCamelCase : str = torch.tensor( [ [1.6512E00, -5.2572E00, -3.3519E00], [3.6169E-02, -5.9025E00, -2.9313E00], [1.0766E-04, -7.7630E00, -5.1263E00], ] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Any = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : Tuple = self.default_image_processor _lowerCamelCase : Tuple = prepare_img() _lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : List[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase ) # masks_queries_logits _lowerCamelCase : List[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCamelCase : int = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] _lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits _lowerCamelCase : Dict = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCamelCase : Any = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : List[str] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , ) _lowerCamelCase : Union[str, Any] = inputs['''pixel_values'''].to(__lowerCAmelCase ) _lowerCamelCase : Dict = [el.to(__lowerCAmelCase ) for el in inputs['''mask_labels''']] _lowerCamelCase : Optional[Any] = [el.to(__lowerCAmelCase ) for el in inputs['''class_labels''']] with torch.no_grad(): _lowerCamelCase : Tuple = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
72
1
"""simple docstring""" import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def snake_case_ ( A_ : Optional[Any] ): '''simple docstring''' return 1.0 / (1.0 + np.exp(-_outputs )) def snake_case_ ( A_ : Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Tuple = np.max(_outputs, axis=-1, keepdims=A_ ) _lowerCamelCase : str = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1, keepdims=A_ ) class __snake_case ( _lowercase): snake_case__ : Tuple = "sigmoid" snake_case__ : str = "softmax" snake_case__ : Tuple = "none" @add_end_docstrings( _lowercase , R"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , ) class __snake_case ( _lowercase): snake_case__ : Dict = False snake_case__ : List[str] = ClassificationFunction.NONE def __init__( self : List[str] , **__lowerCAmelCase : Optional[Any] ): """simple docstring""" super().__init__(**__lowerCAmelCase ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : List[str]="" , **__lowerCAmelCase : Optional[Any] ): """simple docstring""" _lowerCamelCase : Any = tokenizer_kwargs _lowerCamelCase : List[Any] = {} if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None: _lowerCamelCase : str = self.model.config.return_all_scores if isinstance(__lowerCAmelCase , __lowerCAmelCase ) or top_k is None: _lowerCamelCase : Tuple = top_k _lowerCamelCase : List[str] = False elif return_all_scores is not None: warnings.warn( '''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of''' ''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , __lowerCAmelCase , ) if return_all_scores: _lowerCamelCase : Dict = None else: _lowerCamelCase : Any = 1 if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _lowerCamelCase : str = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: _lowerCamelCase : int = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self : Tuple , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = super().__call__(*__lowerCAmelCase , **__lowerCAmelCase ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. _lowerCamelCase : Dict = '''top_k''' not in kwargs if isinstance(args[0] , __lowerCAmelCase ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = self.framework if isinstance(__lowerCAmelCase , __lowerCAmelCase ): return self.tokenizer(**__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) == 1 and isinstance(inputs[0] , __lowerCAmelCase ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( '''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a''' ''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' ) return self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" return self.model(**__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : int=1 , __lowerCAmelCase : Dict=True ): """simple docstring""" if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: _lowerCamelCase : Optional[int] = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: _lowerCamelCase : Optional[int] = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None: _lowerCamelCase : List[str] = self.model.config.function_to_apply else: _lowerCamelCase : List[Any] = ClassificationFunction.NONE _lowerCamelCase : Union[str, Any] = model_outputs['''logits'''][0] _lowerCamelCase : Dict = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: _lowerCamelCase : Dict = sigmoid(__lowerCAmelCase ) elif function_to_apply == ClassificationFunction.SOFTMAX: _lowerCamelCase : Tuple = softmax(__lowerCAmelCase ) elif function_to_apply == ClassificationFunction.NONE: _lowerCamelCase : str = outputs else: raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''' ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} _lowerCamelCase : Tuple = [ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(__lowerCAmelCase ) ] if not _legacy: dict_scores.sort(key=lambda __lowerCAmelCase : x["score"] , reverse=__lowerCAmelCase ) if top_k is not None: _lowerCamelCase : Tuple = dict_scores[:top_k] return dict_scores
72
"""simple docstring""" lowerCAmelCase__ = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def snake_case_ ( A_ : dict, A_ : int, A_ : int ): '''simple docstring''' _lowerCamelCase : List[str] = set() # keep track of all the paths to be checked _lowerCamelCase : str = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _lowerCamelCase : str = queue.pop(0 ) # get the last node from the path _lowerCamelCase : List[Any] = path[-1] if node not in explored: _lowerCamelCase : Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _lowerCamelCase : Union[str, Any] = list(A_ ) new_path.append(A_ ) queue.append(A_ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(A_ ) # in case there's no path between the 2 nodes return [] def snake_case_ ( A_ : dict, A_ : int, A_ : Dict ): '''simple docstring''' if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _lowerCamelCase : Optional[int] = [start] _lowerCamelCase : int = set(A_ ) # Keep tab on distances from `start` node. _lowerCamelCase : int = {start: 0, target: -1} while queue: _lowerCamelCase : Optional[Any] = queue.pop(0 ) if node == target: _lowerCamelCase : Any = ( dist[node] if dist[target] == -1 else min(dist[target], dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(A_ ) queue.append(A_ ) _lowerCamelCase : Any = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
72
1
"""simple docstring""" import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __snake_case ( unittest.TestCase): @property def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" torch.manual_seed(0 ) _lowerCamelCase : str = UNetaDModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[int] = self.dummy_uncond_unet _lowerCamelCase : str = ScoreSdeVeScheduler() _lowerCamelCase : Union[str, Any] = ScoreSdeVePipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase ) sde_ve.to(__lowerCAmelCase ) sde_ve.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : int = torch.manual_seed(0 ) _lowerCamelCase : Union[str, Any] = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__lowerCAmelCase ).images _lowerCamelCase : int = torch.manual_seed(0 ) _lowerCamelCase : Dict = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__lowerCAmelCase , return_dict=__lowerCAmelCase )[ 0 ] _lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1] _lowerCamelCase : Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) _lowerCamelCase : int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = '''google/ncsnpp-church-256''' _lowerCamelCase : int = UNetaDModel.from_pretrained(__lowerCAmelCase ) _lowerCamelCase : List[str] = ScoreSdeVeScheduler.from_pretrained(__lowerCAmelCase ) _lowerCamelCase : Any = ScoreSdeVePipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase ) sde_ve.to(__lowerCAmelCase ) sde_ve.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : str = torch.manual_seed(0 ) _lowerCamelCase : int = sde_ve(num_inference_steps=1_0 , output_type='''numpy''' , generator=__lowerCAmelCase ).images _lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 2_5_6, 2_5_6, 3) _lowerCamelCase : str = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
72
"""simple docstring""" import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def snake_case_ ( A_ : Tuple, A_ : int, A_ : Dict ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = LxmertConfig.from_json_file(A_ ) print(F'''Building PyTorch model from configuration: {config}''' ) _lowerCamelCase : List[str] = LxmertForPreTraining(A_ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(A_, A_, A_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), A_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
72
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class __snake_case ( _lowercase): snake_case__ : str = ( "This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image." "It takes two arguments named `image` which should be the original image, and `label` which should be a text " "describing the elements what should be identified in the segmentation mask. The tool returns the mask." ) snake_case__ : Any = "CIDAS/clipseg-rd64-refined" snake_case__ : Union[str, Any] = "image_segmenter" snake_case__ : Union[str, Any] = CLIPSegForImageSegmentation snake_case__ : int = ["image", "text"] snake_case__ : List[str] = ["image"] def __init__( self : str , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : int ): """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : "Image" , __lowerCAmelCase : str ): """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=__lowerCAmelCase , return_tensors='''pt''' ) def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Optional[Any] ): """simple docstring""" with torch.no_grad(): _lowerCamelCase : List[Any] = self.model(**__lowerCAmelCase ).logits return logits def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Optional[int] ): """simple docstring""" _lowerCamelCase : str = outputs.cpu().detach().numpy() _lowerCamelCase : Dict = 0 _lowerCamelCase : Any = 1 return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
72
"""simple docstring""" from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def snake_case_ ( ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : int = 9, 14 # noqa: F841 _lowerCamelCase : List[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _lowerCamelCase : Any = defaultdict(A_ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) _lowerCamelCase : List[str] = mst(A_ ) _lowerCamelCase : Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _lowerCamelCase : int = tuple(answer[:2] ) _lowerCamelCase : int = tuple(edge[::-1] ) assert edge in result or reverse in result
72
1
"""simple docstring""" import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class __snake_case ( _lowercase): def __init__( self : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str=7_6_8 ): """simple docstring""" super().__init__(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = proj_size _lowerCamelCase : Optional[int] = CLIPVisionModel(__lowerCAmelCase ) _lowerCamelCase : Tuple = PaintByExampleMapper(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = nn.LayerNorm(config.hidden_size ) _lowerCamelCase : Any = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling _lowerCamelCase : int = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple=False ): """simple docstring""" _lowerCamelCase : int = self.model(pixel_values=__lowerCAmelCase ) _lowerCamelCase : int = clip_output.pooler_output _lowerCamelCase : Optional[Any] = self.mapper(latent_states[:, None] ) _lowerCamelCase : List[str] = self.final_layer_norm(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = self.proj_out(__lowerCAmelCase ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class __snake_case ( nn.Module): def __init__( self : int , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" super().__init__() _lowerCamelCase : Optional[Any] = (config.num_hidden_layers + 1) // 5 _lowerCamelCase : Optional[Any] = config.hidden_size _lowerCamelCase : List[str] = 1 _lowerCamelCase : Union[str, Any] = nn.ModuleList( [ BasicTransformerBlock(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , activation_fn='''gelu''' , attention_bias=__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) ] ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] ): """simple docstring""" for block in self.blocks: _lowerCamelCase : Union[str, Any] = block(__lowerCAmelCase ) return hidden_states
72
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all BART models at https://huggingface.co/models?filter=bart lowerCAmelCase__ = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''', }, } lowerCAmelCase__ = { '''facebook/bart-base''': 1024, '''facebook/bart-large''': 1024, '''facebook/bart-large-mnli''': 1024, '''facebook/bart-large-cnn''': 1024, '''facebook/bart-large-xsum''': 1024, '''yjernite/bart_eli5''': 1024, } class __snake_case ( _lowercase): snake_case__ : Any = VOCAB_FILES_NAMES snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ : Optional[int] = ["input_ids", "attention_mask"] snake_case__ : Any = BartTokenizer def __init__( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]="replace" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Union[str, Any]="<unk>" , __lowerCAmelCase : Any="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Optional[Any] , ): """simple docstring""" super().__init__( __lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , ) _lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space: _lowerCamelCase : Dict = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) ) _lowerCamelCase : Any = add_prefix_space _lowerCamelCase : int = pre_tok_class(**__lowerCAmelCase ) _lowerCamelCase : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _lowerCamelCase : List[str] = '''post_processor''' _lowerCamelCase : List[str] = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) if tokenizer_component_instance: _lowerCamelCase : int = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCamelCase : Tuple = tuple(state['''sep'''] ) if "cls" in state: _lowerCamelCase : int = tuple(state['''cls'''] ) _lowerCamelCase : Union[str, Any] = False if state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space: _lowerCamelCase : Dict = add_prefix_space _lowerCamelCase : Optional[Any] = True if state.get('''trim_offsets''' , __lowerCAmelCase ) != trim_offsets: _lowerCamelCase : Any = trim_offsets _lowerCamelCase : str = True if changes_to_apply: _lowerCamelCase : List[str] = getattr(__lowerCAmelCase , state.pop('''type''' ) ) _lowerCamelCase : str = component_class(**__lowerCAmelCase ) setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : Tuple = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value _lowerCamelCase : str = value def SCREAMING_SNAKE_CASE ( self : int , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : Any = kwargs.get('''is_split_into_words''' , __lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ): """simple docstring""" _lowerCamelCase : Tuple = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase ) return tuple(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=None ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ): """simple docstring""" _lowerCamelCase : List[str] = [self.sep_token_id] _lowerCamelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
72
1
"""simple docstring""" # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class __snake_case ( _lowercase): def __init__( self : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict ): """simple docstring""" super().__init__() self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase ) @torch.no_grad() def __call__( self : List[Any] , __lowerCAmelCase : int = 1 , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : int = 5_0 , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Tuple , ): """simple docstring""" _lowerCamelCase : int = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__lowerCAmelCase , ) _lowerCamelCase : Union[str, Any] = image.to(self.device ) # set step values self.scheduler.set_timesteps(__lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output _lowerCamelCase : List[str] = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 _lowerCamelCase : List[str] = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample _lowerCamelCase : List[str] = (image / 2 + 0.5).clamp(0 , 1 ) _lowerCamelCase : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _lowerCamelCase : List[str] = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=__lowerCAmelCase ), "This is a local test"
72
"""simple docstring""" from __future__ import annotations def snake_case_ ( A_ : str ): '''simple docstring''' return [ord(A_ ) - 96 for elem in plain] def snake_case_ ( A_ : list[int] ): '''simple docstring''' return "".join(chr(elem + 96 ) for elem in encoded ) def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Dict = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''', A_ ) print('''Decoded:''', decode(A_ ) ) if __name__ == "__main__": main()
72
1
"""simple docstring""" import pprint import requests lowerCAmelCase__ = '''https://zenquotes.io/api''' def snake_case_ ( ): '''simple docstring''' return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def snake_case_ ( ): '''simple docstring''' return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": lowerCAmelCase__ = random_quotes() pprint.pprint(response)
72
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { '''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''], '''tokenization_lxmert''': ['''LxmertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''LxmertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''LxmertEncoder''', '''LxmertForPreTraining''', '''LxmertForQuestionAnswering''', '''LxmertModel''', '''LxmertPreTrainedModel''', '''LxmertVisualFeatureEncoder''', '''LxmertXLayer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLxmertForPreTraining''', '''TFLxmertMainLayer''', '''TFLxmertModel''', '''TFLxmertPreTrainedModel''', '''TFLxmertVisualFeatureEncoder''', ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
1
"""simple docstring""" from typing import Any def snake_case_ ( A_ : list ): '''simple docstring''' if not input_list: return [] _lowerCamelCase : Dict = [input_list.count(A_ ) for value in input_list] _lowerCamelCase : Union[str, Any] = max(A_ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(A_ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
72
"""simple docstring""" def snake_case_ ( A_ : list[list[float]] ): '''simple docstring''' _lowerCamelCase : list[list[float]] = [] for data in source_data: for i, el in enumerate(A_ ): if len(A_ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(A_ ) ) return data_lists def snake_case_ ( A_ : list[list[float]], A_ : list[int] ): '''simple docstring''' _lowerCamelCase : list[list[float]] = [] for dlist, weight in zip(A_, A_ ): _lowerCamelCase : Any = min(A_ ) _lowerCamelCase : Optional[Any] = max(A_ ) _lowerCamelCase : list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: _lowerCamelCase : str = F'''Invalid weight of {weight:f} provided''' raise ValueError(A_ ) score_lists.append(A_ ) return score_lists def snake_case_ ( A_ : list[list[float]] ): '''simple docstring''' _lowerCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(A_ ): _lowerCamelCase : List[str] = final_scores[j] + ele return final_scores def snake_case_ ( A_ : list[list[float]], A_ : list[int] ): '''simple docstring''' _lowerCamelCase : Tuple = get_data(A_ ) _lowerCamelCase : Optional[Any] = calculate_each_score(A_, A_ ) _lowerCamelCase : str = generate_final_scores(A_ ) # append scores to source data for i, ele in enumerate(A_ ): source_data[i].append(A_ ) return source_data
72
1
"""simple docstring""" import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging lowerCAmelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt'''] lowerCAmelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('''0.9.0'''): raise Exception('''requires fairseq >= 0.9.0''') logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = ''' Hello world! cécé herlolip''' lowerCAmelCase__ = [ ('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''), ('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''), ('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''), ('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''), ] def snake_case_ ( A_ : str ): '''simple docstring''' _lowerCamelCase : Optional[Any] = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', ] for k in ignore_keys: state_dict.pop(A_, A_ ) def snake_case_ ( A_ : str, A_ : Optional[Any], A_ : Tuple ): '''simple docstring''' _lowerCamelCase : str = dct.pop(A_ ) _lowerCamelCase : List[Any] = val def snake_case_ ( A_ : Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Tuple = torch.load(A_, map_location='''cpu''' ) _lowerCamelCase : Optional[int] = torch.hub.load('''pytorch/fairseq''', '''bart.large.cnn''' ).eval() hub_interface.model.load_state_dict(sd['''model'''] ) return hub_interface def snake_case_ ( A_ : str ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : Union[str, Any] = emb.weight.shape _lowerCamelCase : Any = nn.Linear(A_, A_, bias=A_ ) _lowerCamelCase : Optional[Any] = emb.weight.data return lin_layer @torch.no_grad() def snake_case_ ( A_ : Optional[Any], A_ : List[str], A_ : Union[str, Any]=None ): '''simple docstring''' if not os.path.exists(A_ ): _lowerCamelCase : List[Any] = torch.hub.load('''pytorch/fairseq''', A_ ).eval() else: _lowerCamelCase : Optional[int] = load_xsum_checkpoint(A_ ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: _lowerCamelCase : int = checkpoint_path.replace('''.''', '''-''' ) _lowerCamelCase : List[str] = BartConfig.from_pretrained(A_ ) _lowerCamelCase : Any = bart.encode(A_ ).unsqueeze(0 ) _lowerCamelCase : Any = BartTokenizer.from_pretrained(A_ ).encode(A_, return_tensors='''pt''' ).unsqueeze(0 ) if not torch.eq(A_, A_ ).all(): raise ValueError( F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' ) if checkpoint_path == "bart.large.mnli": _lowerCamelCase : str = bart.state_dict() remove_ignore_keys_(A_ ) _lowerCamelCase : int = state_dict['''model.decoder.embed_tokens.weight'''] for src, dest in mnli_rename_keys: rename_key(A_, A_, A_ ) _lowerCamelCase : Tuple = BartForSequenceClassification(A_ ).eval() model.load_state_dict(A_ ) _lowerCamelCase : str = bart.predict('''mnli''', A_, return_logits=A_ ) _lowerCamelCase : List[Any] = model(A_ )[0] # logits else: # no classification heads to worry about _lowerCamelCase : Any = bart.model.state_dict() remove_ignore_keys_(A_ ) _lowerCamelCase : Optional[int] = state_dict['''decoder.embed_tokens.weight'''] _lowerCamelCase : Dict = bart.extract_features(A_ ) if hf_checkpoint_name == "facebook/bart-large": _lowerCamelCase : str = BartModel(A_ ).eval() model.load_state_dict(A_ ) _lowerCamelCase : Dict = model(A_ ).model[0] else: _lowerCamelCase : str = BartForConditionalGeneration(A_ ).eval() # an existing summarization ckpt model.model.load_state_dict(A_ ) if hasattr(A_, '''lm_head''' ): _lowerCamelCase : Tuple = make_linear_from_emb(model.model.shared ) _lowerCamelCase : str = model.model(A_ )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' ) Path(A_ ).mkdir(exist_ok=A_ ) model.save_pretrained(A_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum''' ) lowerCAmelCase__ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
72
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __snake_case ( _lowercase): snake_case__ : List[str] = "unispeech" def __init__( self : List[str] , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Dict=1E-5 , __lowerCAmelCase : Optional[int]="group" , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=1_2_8 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=3_2_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=1_0_0 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict="mean" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Dict=8_0 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=0.5 , **__lowerCAmelCase : Optional[Any] , ): """simple docstring""" super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase ) _lowerCamelCase : Dict = hidden_size _lowerCamelCase : Any = feat_extract_norm _lowerCamelCase : List[Any] = feat_extract_activation _lowerCamelCase : Any = list(__lowerCAmelCase ) _lowerCamelCase : Tuple = list(__lowerCAmelCase ) _lowerCamelCase : int = list(__lowerCAmelCase ) _lowerCamelCase : List[str] = conv_bias _lowerCamelCase : List[str] = num_conv_pos_embeddings _lowerCamelCase : Tuple = num_conv_pos_embedding_groups _lowerCamelCase : List[str] = len(self.conv_dim ) _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : List[Any] = intermediate_size _lowerCamelCase : Dict = hidden_act _lowerCamelCase : Union[str, Any] = num_attention_heads _lowerCamelCase : Tuple = hidden_dropout _lowerCamelCase : List[Any] = attention_dropout _lowerCamelCase : Optional[int] = activation_dropout _lowerCamelCase : Optional[Any] = feat_proj_dropout _lowerCamelCase : Optional[int] = final_dropout _lowerCamelCase : Any = layerdrop _lowerCamelCase : Any = layer_norm_eps _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : List[str] = num_ctc_classes _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Optional[Any] = do_stable_layer_norm _lowerCamelCase : Tuple = use_weighted_layer_sum _lowerCamelCase : List[Any] = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCamelCase : Any = apply_spec_augment _lowerCamelCase : Dict = mask_time_prob _lowerCamelCase : List[str] = mask_time_length _lowerCamelCase : Optional[Any] = mask_time_min_masks _lowerCamelCase : List[str] = mask_feature_prob _lowerCamelCase : int = mask_feature_length _lowerCamelCase : Dict = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _lowerCamelCase : Optional[Any] = num_codevectors_per_group _lowerCamelCase : int = num_codevector_groups _lowerCamelCase : List[Any] = contrastive_logits_temperature _lowerCamelCase : List[str] = feat_quantizer_dropout _lowerCamelCase : Dict = num_negatives _lowerCamelCase : Optional[int] = codevector_dim _lowerCamelCase : List[Any] = proj_codevector_dim _lowerCamelCase : List[Any] = diversity_loss_weight # ctc loss _lowerCamelCase : Union[str, Any] = ctc_loss_reduction _lowerCamelCase : Any = ctc_zero_infinity # pretraining loss _lowerCamelCase : str = replace_prob @property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
72
1
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''') @require_sentencepiece @require_tokenizers class __snake_case ( _lowercase , unittest.TestCase): snake_case__ : Optional[int] = SpeechTaTokenizer snake_case__ : List[Any] = False snake_case__ : List[str] = True def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _lowerCamelCase : Optional[Any] = SpeechTaTokenizer(__lowerCAmelCase ) _lowerCamelCase : Any = AddedToken('''<mask>''' , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = '''this is a test''' _lowerCamelCase : int = '''this is a test''' return input_text, output_text def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str=False , __lowerCAmelCase : Any=2_0 , __lowerCAmelCase : int=5 ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Any = self.get_input_output_texts(__lowerCAmelCase ) _lowerCamelCase : Any = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) _lowerCamelCase : int = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) return text, ids def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Union[str, Any] = '''<pad>''' _lowerCamelCase : Tuple = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-4] , '''œ''' ) self.assertEqual(vocab_keys[-2] , '''<mask>''' ) self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' ) self.assertEqual(len(__lowerCAmelCase ) , 8_1 ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 7_9 ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : Tuple = self.get_tokenizers(do_lower_case=__lowerCAmelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): _lowerCamelCase : Dict = tokenizer.vocab_size _lowerCamelCase : List[Any] = len(__lowerCAmelCase ) self.assertNotEqual(__lowerCAmelCase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) _lowerCamelCase : int = ['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] _lowerCamelCase : Dict = tokenizer.add_tokens(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = tokenizer.vocab_size _lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase ) self.assertNotEqual(__lowerCAmelCase , 0 ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , len(__lowerCAmelCase ) ) self.assertEqual(__lowerCAmelCase , all_size + len(__lowerCAmelCase ) ) _lowerCamelCase : int = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowerCAmelCase ) self.assertGreaterEqual(len(__lowerCAmelCase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) _lowerCamelCase : List[Any] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} _lowerCamelCase : int = tokenizer.add_special_tokens(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = tokenizer.vocab_size _lowerCamelCase : Optional[Any] = len(__lowerCAmelCase ) self.assertNotEqual(__lowerCAmelCase , 0 ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , len(__lowerCAmelCase ) ) self.assertEqual(__lowerCAmelCase , all_size_a + len(__lowerCAmelCase ) ) _lowerCamelCase : str = tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowerCAmelCase ) self.assertGreaterEqual(len(__lowerCAmelCase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = self.get_tokenizer() _lowerCamelCase : List[Any] = tokenizer.tokenize('''This is a test''' ) # fmt: off self.assertListEqual(__lowerCAmelCase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] , ) _lowerCamelCase : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __lowerCAmelCase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) _lowerCamelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) # fmt: off self.assertListEqual(__lowerCAmelCase , [4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6] ) # fmt: on _lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__lowerCAmelCase ) self.assertListEqual( __lowerCAmelCase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) @slow def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[int] = [ '''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ''' '''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ''' '''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ''' '''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''', '''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ''' '''conditioning on both left and right context in all layers.''', '''The quick brown fox jumps over the lazy dog.''', ] # fmt: off _lowerCamelCase : str = { '''input_ids''': [ [4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2], [4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCAmelCase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__lowerCAmelCase , )
72
"""simple docstring""" from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def snake_case_ ( A_ : str, A_ : str, A_ : Optional[str] = None ): '''simple docstring''' if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release: # old versions of hfh don't url-encode the file path _lowerCamelCase : Optional[Any] = quote(A_ ) return hfh.hf_hub_url(A_, A_, repo_type='''dataset''', revision=A_ )
72
1