code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : Dict = logging.get_logger(__name__) lowercase : Dict = { 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class A ( __snake_case ): __magic_name__ = '''canine''' def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=16384 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=0XE_000 , SCREAMING_SNAKE_CASE=0XE_001 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=16384 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : List[str] = max_position_embeddings A : Any = hidden_size A : List[str] = num_hidden_layers A : Any = num_attention_heads A : Dict = intermediate_size A : str = hidden_act A : Optional[int] = hidden_dropout_prob A : Tuple = attention_probs_dropout_prob A : str = initializer_range A : List[str] = type_vocab_size A : List[Any] = layer_norm_eps # Character config: A : Optional[int] = downsampling_rate A : int = upsampling_kernel_size A : Any = num_hash_functions A : List[str] = num_hash_buckets A : int = local_transformer_stride
367
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A ( __snake_case ): __magic_name__ = (UniPCMultistepScheduler,) __magic_name__ = (('''num_inference_steps''', 25),) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : str = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''solver_type''': '''bh2''', } config.update(**SCREAMING_SNAKE_CASE ) return config def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : List[Any] = dict(self.forward_default_kwargs ) A : Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) A : Optional[Any] = self.dummy_sample A : int = 0.1 * sample A : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals A : List[Any] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE ) A : List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE ) new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals A : Dict = dummy_past_residuals[: new_scheduler.config.solver_order] A, A : Tuple = sample, sample for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ): A : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Optional[Any] = dict(self.forward_default_kwargs ) A : Tuple = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) A : List[Any] = self.dummy_sample A : int = 0.1 * sample A : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A : Optional[int] = self.get_scheduler_config() A : Any = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals (must be after setting timesteps) A : int = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE ) A : int = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residual (must be after setting timesteps) A : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order] A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" if scheduler is None: A : Dict = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Tuple = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : int = 10 A : Tuple = self.dummy_model() A : Any = self.dummy_sample_deter scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): A : int = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample return sample def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = dict(self.forward_default_kwargs ) A : List[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config() A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Optional[Any] = self.dummy_sample A : Optional[int] = 0.1 * sample if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ): scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ): A : Tuple = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A : Dict = [residual + 0.2, residual + 0.15, residual + 0.10] A : List[str] = dummy_past_residuals[: scheduler.config.solver_order] A : List[Any] = scheduler.timesteps[5] A : Dict = scheduler.timesteps[6] A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() ) A : List[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE ) A : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 A : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config ) A : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config ) A : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config ) A : Optional[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE ) A : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , ) A : Dict = self.full_loop( solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , ) assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers" def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE ) self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : int = self.full_loop() A : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : List[Any] = self.full_loop(prediction_type='''v_prediction''' ) A : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.1_014 ) < 1e-3 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Dict = self.scheduler_classes[0] A : List[Any] = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 ) A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Tuple = 10 A : Union[str, Any] = self.dummy_model() A : Dict = self.dummy_sample_deter.half() scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): A : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample assert sample.dtype == torch.floataa def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
311
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase : Union[str, Any] = { 'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Union[str, Any] = [ 'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MegatronBertForCausalLM', 'MegatronBertForMaskedLM', 'MegatronBertForMultipleChoice', 'MegatronBertForNextSentencePrediction', 'MegatronBertForPreTraining', 'MegatronBertForQuestionAnswering', 'MegatronBertForSequenceClassification', 'MegatronBertForTokenClassification', 'MegatronBertModel', 'MegatronBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
368
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM A : Dict = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE ): A : List[Any] = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: A : Optional[int] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size: raise ValueError( F'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch' F' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) A : str = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A : Any = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A : int = self.scheduler.step( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , use_clipped_model_output=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample A : Dict = (image / 2 + 0.5).clamp(0 , 1 ) A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A : int = self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
311
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : Union[str, Any] = logging.get_logger(__name__) lowercase : Optional[Any] = { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json', # See all REALM models at https://huggingface.co/models?filter=realm } class A ( __snake_case ): __magic_name__ = '''realm''' def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=1e-3 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=320 , SCREAMING_SNAKE_CASE=13353718 , SCREAMING_SNAKE_CASE=5000 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # Common config A : int = vocab_size A : int = max_position_embeddings A : str = hidden_size A : List[Any] = retriever_proj_size A : Tuple = num_hidden_layers A : int = num_attention_heads A : Optional[int] = num_candidates A : Optional[Any] = intermediate_size A : Any = hidden_act A : int = hidden_dropout_prob A : Optional[Any] = attention_probs_dropout_prob A : List[Any] = initializer_range A : Tuple = type_vocab_size A : Optional[Any] = layer_norm_eps # Reader config A : Union[str, Any] = span_hidden_size A : Tuple = max_span_width A : Dict = reader_layer_norm_eps A : Optional[int] = reader_beam_size A : Union[str, Any] = reader_seq_len # Retrieval config A : List[str] = num_block_records A : str = searcher_beam_size
369
'''simple docstring''' from __future__ import annotations from random import random class A : def __init__( self , SCREAMING_SNAKE_CASE = None ) -> Tuple: """simple docstring""" A : Optional[Any] = value A : Any = random() A : Node | None = None A : Node | None = None def __repr__( self ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return F'\'{self.value}: {self.prior:.5}\'' else: return pformat( {F'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 ) def __str__( self ) -> str: """simple docstring""" A : Optional[Any] = str(self.value ) + ''' ''' A : Union[str, Any] = str(self.left or '''''' ) A : Any = str(self.right or '''''' ) return value + left + right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: A, A : Any = split(root.left , snake_case__ ) return left, root else: A, A : Optional[int] = split(root.right , snake_case__ ) return root, right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: A : List[str] = merge(left.right , snake_case__ ) return left else: A : Tuple = merge(snake_case__ , right.left ) return right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : List[Any] = Node(snake_case__ ) A, A : Tuple = split(snake_case__ , snake_case__ ) return merge(merge(snake_case__ , snake_case__ ) , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A, A : Dict = split(snake_case__ , value - 1 ) A, A : Any = split(snake_case__ , snake_case__ ) return merge(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not root: # None return else: inorder(root.left ) print(root.value , end=''',''' ) inorder(root.right ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' for arg in args.split(): if arg[0] == "+": A : int = insert(snake_case__ , int(arg[1:] ) ) elif arg[0] == "-": A : int = erase(snake_case__ , int(arg[1:] ) ) else: print('''Unknown command''' ) return root def lowerCAmelCase_ ( ): '''simple docstring''' A : Union[str, Any] = None print( '''enter numbers to create a tree, + value to add value into treap, ''' '''- value to erase all nodes with value. \'q\' to quit. ''' ) A : Optional[int] = input() while args != "q": A : str = interact_treap(snake_case__ , snake_case__ ) print(snake_case__ ) A : Union[str, Any] = input() print('''good by!''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
311
0
'''simple docstring''' import math import tensorflow as tf from packaging import version def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[Any] = tf.convert_to_tensor(snake_case__ ) A : List[Any] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Any = tf.convert_to_tensor(snake_case__ ) A : Dict = tf.cast(math.pi , x.dtype ) A : Optional[Any] = tf.cast(0.04_47_15 , x.dtype ) A : List[str] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(snake_case__ , 3 )) )) return x * cdf def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[int] = tf.convert_to_tensor(snake_case__ ) return x * tf.tanh(tf.math.softplus(snake_case__ ) ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Union[str, Any] = tf.convert_to_tensor(snake_case__ ) A : List[Any] = tf.cast(0.04_47_15 , x.dtype ) A : int = tf.cast(0.79_78_84_56_08 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Any = tf.convert_to_tensor(snake_case__ ) A : List[Any] = tf.cast(1.7_02 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return tf.clip_by_value(_gelu(snake_case__ ) , -10 , 10 ) def lowerCAmelCase_ ( snake_case__ , snake_case__=-1 ): '''simple docstring''' A : Optional[Any] = tf.split(snake_case__ , 2 , axis=snake_case__ ) return a * tf.math.sigmoid(snake_case__ ) if version.parse(tf.version.VERSION) >= version.parse('2.4'): def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return tf.keras.activations.gelu(snake_case__ , approximate=snake_case__ ) lowercase : Union[str, Any] = tf.keras.activations.gelu lowercase : str = approximate_gelu_wrap else: lowercase : Tuple = _gelu lowercase : Optional[int] = _gelu_new lowercase : str = { 'gelu': gelu, 'gelu_10': gelu_aa, 'gelu_fast': gelu_fast, 'gelu_new': gelu_new, 'glu': glu, 'mish': mish, 'quick_gelu': quick_gelu, 'relu': tf.keras.activations.relu, 'sigmoid': tf.keras.activations.sigmoid, 'silu': tf.keras.activations.swish, 'swish': tf.keras.activations.swish, 'tanh': tf.keras.activations.tanh, } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
370
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=sys.maxsize ) -> Union[str, Any]: """simple docstring""" A : Tuple = '''bilinear''' A : Optional[int] = max_size A : Dict = short_edge_length def __call__( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Tuple = [] for img in imgs: A, A : str = img.shape[:2] # later: provide list and randomly choose index for resize A : Union[str, Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img A : int = size * 1.0 / min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if h < w: A, A : Tuple = size, scale * w else: A, A : str = scale * h, size if max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) > self.max_size: A : List[str] = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Tuple = newh * scale A : int = neww * scale A : List[str] = int(neww + 0.5 ) A : int = int(newh + 0.5 ) if img.dtype == np.uinta: A : Dict = Image.fromarray(SCREAMING_SNAKE_CASE ) A : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) A : str = np.asarray(SCREAMING_SNAKE_CASE ) else: A : Dict = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw A : List[Any] = nn.functional.interpolate( SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE ).squeeze(0 ) img_augs.append(SCREAMING_SNAKE_CASE ) return img_augs class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) A : str = cfg.INPUT.FORMAT A : int = cfg.SIZE_DIVISIBILITY A : Optional[int] = cfg.PAD_VALUE A : Dict = cfg.INPUT.MAX_SIZE_TEST A : Optional[Any] = cfg.MODEL.DEVICE A : Dict = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) A : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) A : str = lambda SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : Union[str, Any] = tuple(max(SCREAMING_SNAKE_CASE ) for s in zip(*[img.shape for img in images] ) ) A : List[str] = [im.shape[-2:] for im in images] A : Optional[Any] = [ nn.functional.pad( SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] return torch.stack(SCREAMING_SNAKE_CASE ), torch.tensor(SCREAMING_SNAKE_CASE ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : str = [images] if single_image: assert len(SCREAMING_SNAKE_CASE ) == 1 for i in range(len(SCREAMING_SNAKE_CASE ) ): if isinstance(images[i] , torch.Tensor ): images.insert(SCREAMING_SNAKE_CASE , images.pop(SCREAMING_SNAKE_CASE ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge A : Tuple = torch.tensor([im.shape[:2] for im in images] ) A : Dict = self.aug(SCREAMING_SNAKE_CASE ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic A : Tuple = [self.normalizer(SCREAMING_SNAKE_CASE ) for x in images] # now pad them to do the following operations A, A : Optional[int] = self.pad(SCREAMING_SNAKE_CASE ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad A : Tuple = torch.true_divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' assert torch.isfinite(snake_case__ ).all(), "Box tensor contains infinite or NaN!" A, A : str = box_size tensor[:, 0].clamp_(min=0 , max=snake_case__ ) tensor[:, 1].clamp_(min=0 , max=snake_case__ ) tensor[:, 2].clamp_(min=0 , max=snake_case__ ) tensor[:, 3].clamp_(min=0 , max=snake_case__ )
311
0
def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' return x if y == 0 else greatest_common_divisor(snake_case__ , x % y ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' return (x * y) // greatest_common_divisor(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ = 20 ): '''simple docstring''' A : Any = 1 for i in range(1 , n + 1 ): A : int = lcm(snake_case__ , snake_case__ ) return g if __name__ == "__main__": print(f'''{solution() = }''')
371
'''simple docstring''' import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": lowercase : Tuple = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '--original_config_file', default=None, type=str, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--scheduler_type', default='pndm', type=str, help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']', ) parser.add_argument( '--pipeline_type', default=None, type=str, help=( 'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'' '. If `None` pipeline will be automatically inferred.' ), ) parser.add_argument( '--image_size', default=None, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--prediction_type', default=None, type=str, help=( 'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable' ' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') parser.add_argument( '--stable_unclip', type=str, default=None, required=False, help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.', ) parser.add_argument( '--stable_unclip_prior', type=str, default=None, required=False, help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.', ) parser.add_argument( '--clip_stats_path', type=str, help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.', required=False, ) parser.add_argument( '--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.' ) parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--vae_path', type=str, default=None, required=False, help='Set to a path, hub id to an already converted vae to not convert it again.', ) lowercase : Tuple = parser.parse_args() lowercase : Union[str, Any] = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
311
0
'''simple docstring''' import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class A ( __snake_case , unittest.TestCase ): __magic_name__ = BertTokenizer __magic_name__ = BertTokenizerFast __magic_name__ = True __magic_name__ = True __magic_name__ = filter_non_english def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" super().setUp() A : int = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] A : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : Optional[Any] = '''UNwant\u00E9d,running''' A : Optional[Any] = '''unwanted, running''' return input_text, output_text def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Any = self.tokenizer_class(self.vocab_file ) A : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(SCREAMING_SNAKE_CASE , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , [9, 6, 7, 12, 10, 11] ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" if not self.test_rust_tokenizer: return A : List[str] = self.get_tokenizer() A : str = self.get_rust_tokenizer() A : List[str] = '''UNwant\u00E9d,running''' A : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Any = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[Any] = self.get_rust_tokenizer() A : str = tokenizer.encode(SCREAMING_SNAKE_CASE ) A : List[str] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # With lower casing A : List[str] = self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE ) A : Dict = self.get_rust_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = '''UNwant\u00E9d,running''' A : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE ) A : Optional[Any] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Dict = self.get_rust_tokenizer() A : Optional[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE ) A : str = rust_tokenizer.encode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Union[str, Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : List[Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : List[Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Union[str, Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[int] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Optional[Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : List[str] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Union[str, Any] = BasicTokenizer() A : Optional[Any] = '''a\n\'ll !!to?\'d of, can\'t.''' A : Dict = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.'''] self.assertListEqual(tokenizer.tokenize(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] A : List[str] = {} for i, token in enumerate(SCREAMING_SNAKE_CASE ): A : int = i A : Any = WordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : int = self.get_tokenizer() A : Any = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Union[str, Any] = self.tokenizer_class.from_pretrained('''bert-base-uncased''' ) A : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE ) A : List[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Dict = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.' A : Optional[int] = tokenizer_r.encode_plus( SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , ) A : Optional[Any] = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE , '''do_lower_case''' ) else False A : Dict = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''Allen'''), ((21, 23), '''##NL'''), ((23, 24), '''##P'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''allen'''), ((21, 23), '''##nl'''), ((23, 24), '''##p'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Union[str, Any] = ['''的''', '''人''', '''有'''] A : str = ''''''.join(SCREAMING_SNAKE_CASE ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A : List[Any] = True A : List[str] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Dict = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = tokenizer_p.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Optional[int] = tokenizer_r.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Optional[int] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ) A : Tuple = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Any = False A : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Any = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : str = tokenizer_r.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) A : int = tokenizer_p.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) A : int = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ) A : Tuple = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ) # it is expected that only the first Chinese character is not preceded by "##". A : List[str] = [ F'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE ) ] self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
350
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal lowercase : str = datasets.utils.logging.get_logger(__name__) lowercase : Union[str, Any] = ['names', 'prefix'] lowercase : Union[str, Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] lowercase : List[Any] = ['encoding_errors', 'on_bad_lines'] lowercase : Any = ['date_format'] @dataclass class A ( datasets.BuilderConfig ): __magic_name__ = "," __magic_name__ = None __magic_name__ = "infer" __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = False __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = True __magic_name__ = False __magic_name__ = True __magic_name__ = None __magic_name__ = "." __magic_name__ = None __magic_name__ = '"' __magic_name__ = 0 __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = True __magic_name__ = 0 __magic_name__ = True __magic_name__ = False __magic_name__ = None __magic_name__ = 10000 __magic_name__ = None __magic_name__ = "strict" __magic_name__ = "error" __magic_name__ = None def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" if self.delimiter is not None: A : Optional[Any] = self.delimiter if self.column_names is not None: A : Optional[Any] = self.column_names @property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : str = { '''sep''': self.sep, '''header''': self.header, '''names''': self.names, '''index_col''': self.index_col, '''usecols''': self.usecols, '''prefix''': self.prefix, '''mangle_dupe_cols''': self.mangle_dupe_cols, '''engine''': self.engine, '''converters''': self.converters, '''true_values''': self.true_values, '''false_values''': self.false_values, '''skipinitialspace''': self.skipinitialspace, '''skiprows''': self.skiprows, '''nrows''': self.nrows, '''na_values''': self.na_values, '''keep_default_na''': self.keep_default_na, '''na_filter''': self.na_filter, '''verbose''': self.verbose, '''skip_blank_lines''': self.skip_blank_lines, '''thousands''': self.thousands, '''decimal''': self.decimal, '''lineterminator''': self.lineterminator, '''quotechar''': self.quotechar, '''quoting''': self.quoting, '''escapechar''': self.escapechar, '''comment''': self.comment, '''encoding''': self.encoding, '''dialect''': self.dialect, '''error_bad_lines''': self.error_bad_lines, '''warn_bad_lines''': self.warn_bad_lines, '''skipfooter''': self.skipfooter, '''doublequote''': self.doublequote, '''memory_map''': self.memory_map, '''float_precision''': self.float_precision, '''chunksize''': self.chunksize, '''encoding_errors''': self.encoding_errors, '''on_bad_lines''': self.on_bad_lines, '''date_format''': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class A ( datasets.ArrowBasedBuilder ): __magic_name__ = CsvConfig def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) A : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(SCREAMING_SNAKE_CASE , (str, list, tuple) ): A : str = data_files if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : int = [files] A : Optional[int] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] A : Tuple = [] for split_name, files in data_files.items(): if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : List[str] = [files] A : List[str] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files] splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) ) return splits def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> pa.Table: """simple docstring""" if self.config.features is not None: A : Optional[int] = self.config.features.arrow_schema if all(not require_storage_cast(SCREAMING_SNAKE_CASE ) for feature in self.config.features.values() ): # cheaper cast A : List[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE ) else: # more expensive cast; allows str <-> int/float or str to Audio for example A : int = table_cast(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return pa_table def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" A : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str A : int = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE ) ): A : Union[str, Any] = pd.read_csv(SCREAMING_SNAKE_CASE , iterator=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE ): A : Dict = pa.Table.from_pandas(SCREAMING_SNAKE_CASE ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE ) except ValueError as e: logger.error(F'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE )}: {e}' ) raise
311
0
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})' def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=True ): '''simple docstring''' model.train() A : Union[str, Any] = model(snake_case__ ) A : List[str] = F.mse_loss(snake_case__ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' set_seed(42 ) A : Optional[Any] = RegressionModel() A : Union[str, Any] = deepcopy(snake_case__ ) A : Optional[Any] = RegressionDataset(length=80 ) A : Any = DataLoader(snake_case__ , batch_size=16 ) model.to(accelerator.device ) if sched: A : int = AdamW(params=model.parameters() , lr=1E-3 ) A : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3 ) A : Optional[Any] = LambdaLR(snake_case__ , lr_lambda=lambda snake_case__ : epoch**0.65 ) A : Tuple = LambdaLR(snake_case__ , lr_lambda=lambda snake_case__ : epoch**0.65 ) # Make a copy of `model` if sched: A : Tuple = accelerator.prepare(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) else: A : Optional[Any] = accelerator.prepare(snake_case__ , snake_case__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = get_training_setup(snake_case__ ) # Use a single batch A : List[str] = next(iter(snake_case__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model A : Tuple = accelerator.gather((ddp_input, ddp_target) ) A : List[str] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case__ ): step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) else: # Sync grads step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) A : List[Any] = ddp_input[torch.randperm(len(snake_case__ ) )] def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[Any] = get_training_setup(snake_case__ ) # Use a single batch A : Tuple = next(iter(snake_case__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model A : Tuple = accelerator.gather((ddp_input, ddp_target) ) A : Dict = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case__ ): step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) else: # Sync grads step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) A : Union[str, Any] = ddp_input[torch.randperm(len(snake_case__ ) )] def lowerCAmelCase_ ( snake_case__=False , snake_case__=False ): '''simple docstring''' A : int = Accelerator( split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly A : Optional[int] = get_training_setup(snake_case__ ) for iteration, batch in enumerate(snake_case__ ): A : str = batch.values() # Gather the distributed inputs and targs for the base model A : Tuple = accelerator.gather((ddp_input, ddp_target) ) A : str = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(snake_case__ ): step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) A : Any = ddp_input[torch.randperm(len(snake_case__ ) )] GradientState._reset_state() def lowerCAmelCase_ ( snake_case__=False , snake_case__=False ): '''simple docstring''' A : Any = Accelerator( split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly A : Tuple = get_training_setup(snake_case__ , snake_case__ ) for iteration, batch in enumerate(snake_case__ ): A : List[Any] = batch.values() # Gather the distributed inputs and targs for the base model A : Union[str, Any] = accelerator.gather((ddp_input, ddp_target) ) A : Any = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(snake_case__ ): step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n' A : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case__ )) if accelerator.num_processes > 1: check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def lowerCAmelCase_ ( ): '''simple docstring''' A : Any = Accelerator() A : Any = RegressionDataset(length=80 ) A : Union[str, Any] = DataLoader(snake_case__ , batch_size=16 ) A : Optional[int] = RegressionDataset(length=96 ) A : Optional[Any] = DataLoader(snake_case__ , batch_size=16 ) A : List[Any] = accelerator.prepare(snake_case__ , snake_case__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(snake_case__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ ) if iteration < len(snake_case__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(snake_case__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ ) if batch_num < len(snake_case__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def lowerCAmelCase_ ( ): '''simple docstring''' A : Tuple = Accelerator() A : int = accelerator.state if state.local_process_index == 0: print('''**Test `accumulate` gradient accumulation with dataloader break**''' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('''**Test NOOP `no_sync` context manager**''' ) test_noop_sync(snake_case__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('''**Test Distributed `no_sync` context manager**''' ) test_distributed_sync(snake_case__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation, ''' , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation(snake_case__ , snake_case__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation_with_opt_and_scheduler(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' main() if __name__ == "__main__": main()
351
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : int = logging.get_logger(__name__) lowercase : int = { 'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json', # See all SEW models at https://huggingface.co/models?filter=sew } class A ( __snake_case ): __magic_name__ = '''sew''' def __init__( self , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE="group" , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.05 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="mean" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=2 , **SCREAMING_SNAKE_CASE , ) -> Tuple: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE , pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE ) A : Optional[Any] = hidden_size A : Any = feat_extract_norm A : Optional[int] = feat_extract_activation A : Tuple = list(SCREAMING_SNAKE_CASE ) A : List[str] = list(SCREAMING_SNAKE_CASE ) A : List[str] = list(SCREAMING_SNAKE_CASE ) A : int = conv_bias A : List[Any] = num_conv_pos_embeddings A : Tuple = num_conv_pos_embedding_groups A : int = len(self.conv_dim ) A : Dict = num_hidden_layers A : Optional[int] = intermediate_size A : Any = squeeze_factor A : int = hidden_act A : str = num_attention_heads A : Dict = hidden_dropout A : Optional[Any] = attention_dropout A : List[str] = activation_dropout A : Union[str, Any] = feat_proj_dropout A : Union[str, Any] = final_dropout A : int = layerdrop A : Optional[Any] = layer_norm_eps A : Any = initializer_range A : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A : Optional[Any] = apply_spec_augment A : Optional[Any] = mask_time_prob A : Union[str, Any] = mask_time_length A : Optional[Any] = mask_time_min_masks A : str = mask_feature_prob A : Tuple = mask_feature_length A : Any = mask_feature_min_masks # ctc loss A : List[Any] = ctc_loss_reduction A : Dict = ctc_zero_infinity # sequence classification A : int = use_weighted_layer_sum A : Optional[int] = classifier_proj_size @property def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
311
0
'''simple docstring''' import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , ) -> str: """simple docstring""" A : Any = parent A : List[Any] = batch_size A : Union[str, Any] = seq_length A : Any = is_training A : int = use_input_mask A : Union[str, Any] = vocab_size A : List[Any] = hidden_size A : List[Any] = num_hidden_layers A : Optional[int] = num_attention_heads A : str = intermediate_size A : Tuple = hidden_act A : Union[str, Any] = hidden_dropout_prob A : Union[str, Any] = attention_probs_dropout_prob A : int = max_position_embeddings A : Optional[int] = initializer_range A : Any = use_labels A : Optional[int] = scope def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Optional[int] = None if self.use_input_mask: A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Dict = self.get_config() return config, input_ids, input_mask, token_labels def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" ( A ) : Any = self.prepare_config_and_inputs() A : Tuple = True A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" A : List[str] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) A : int = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" A : List[str] = True A : Union[str, Any] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , ) A : List[Any] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = True A : Tuple = True A : Optional[int] = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval() # first forward pass A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , ) A : Optional[int] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size ) A : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and A : Dict = torch.cat([input_ids, next_tokens] , dim=-1 ) A : List[str] = torch.cat([input_mask, next_mask] , dim=-1 ) A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] A : Any = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] # select random slice A : int = ids_tensor((1,) , output_from_past.shape[-1] ).item() A : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() A : str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" A : Optional[Any] = BertGenerationDecoder(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Optional[int] = self.prepare_config_and_inputs() A : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () __magic_name__ = (BertGenerationDecoder,) if is_torch_available() else () __magic_name__ = ( {'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder} if is_torch_available() else {} ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : List[str] = BertGenerationEncoderTester(self ) A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs() A : str = '''bert''' self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" ( A ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() A : Union[str, Any] = None self.model_tester.create_and_check_model_as_decoder( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Optional[Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Tuple = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) A : Optional[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): A : Dict = model(SCREAMING_SNAKE_CASE )[0] A : Optional[Any] = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Dict = torch.tensor( [[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[Any] = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) A : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): A : Optional[Any] = model(SCREAMING_SNAKE_CASE )[0] A : Optional[Any] = torch.Size([1, 8, 50358] ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Any = torch.tensor( [[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
352
'''simple docstring''' import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = SwinConfig() A : List[Any] = swin_name.split('''_''' ) A : Tuple = name_split[1] A : Union[str, Any] = int(name_split[4] ) A : str = int(name_split[3][-1] ) if model_size == "tiny": A : Optional[int] = 96 A : Optional[Any] = (2, 2, 6, 2) A : Any = (3, 6, 12, 24) elif model_size == "small": A : Optional[int] = 96 A : str = (2, 2, 18, 2) A : Tuple = (3, 6, 12, 24) elif model_size == "base": A : int = 128 A : Optional[Any] = (2, 2, 18, 2) A : List[str] = (4, 8, 16, 32) else: A : Dict = 192 A : Optional[Any] = (2, 2, 18, 2) A : Optional[Any] = (6, 12, 24, 48) if "in22k" in swin_name: A : Dict = 2_1841 else: A : str = 1000 A : List[str] = '''huggingface/label-files''' A : Any = '''imagenet-1k-id2label.json''' A : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) ) A : str = {int(snake_case__ ): v for k, v in idalabel.items()} A : Tuple = idalabel A : Tuple = {v: k for k, v in idalabel.items()} A : Tuple = img_size A : Dict = num_classes A : Optional[Any] = embed_dim A : str = depths A : str = num_heads A : Optional[int] = window_size return config def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if "patch_embed.proj" in name: A : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: A : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: A : Optional[int] = '''encoder.''' + name if "attn.proj" in name: A : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: A : List[str] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: A : Any = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: A : Tuple = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: A : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: A : str = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "norm.weight": A : Tuple = '''layernorm.weight''' if name == "norm.bias": A : Tuple = '''layernorm.bias''' if "head" in name: A : Any = name.replace('''head''' , '''classifier''' ) else: A : List[Any] = '''swin.''' + name return name def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): A : Dict = orig_state_dict.pop(snake_case__ ) if "mask" in key: continue elif "qkv" in key: A : Dict = key.split('''.''' ) A : Optional[int] = int(key_split[1] ) A : List[str] = int(key_split[3] ) A : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: A : Any = val[:dim, :] A : Dict = val[ dim : dim * 2, : ] A : List[str] = val[-dim:, :] else: A : Any = val[ :dim ] A : Optional[int] = val[ dim : dim * 2 ] A : Any = val[ -dim: ] else: A : str = val return orig_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Tuple = timm.create_model(snake_case__ , pretrained=snake_case__ ) timm_model.eval() A : Optional[Any] = get_swin_config(snake_case__ ) A : Optional[int] = SwinForImageClassification(snake_case__ ) model.eval() A : List[str] = convert_state_dict(timm_model.state_dict() , snake_case__ ) model.load_state_dict(snake_case__ ) A : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A : Any = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) ) A : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) A : List[Any] = image_processor(images=snake_case__ , return_tensors='''pt''' ) A : Any = timm_model(inputs['''pixel_values'''] ) A : Optional[Any] = model(**snake_case__ ).logits assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case__ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swin_name', default='swin_tiny_patch4_window7_224', type=str, help='Name of the Swin timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowercase : int = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
311
0
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if len(snake_case__ ) == 0: return [] A : Union[str, Any] = min(snake_case__ ), max(snake_case__ ) A : Tuple = int(max_value - min_value ) + 1 A : list[list] = [[] for _ in range(snake_case__ )] for i in my_list: buckets[int(i - min_value )].append(snake_case__ ) return [v for bucket in buckets for v in sorted(snake_case__ )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
353
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : Optional[int] = logging.get_logger(__name__) lowercase : Tuple = { 'google/pix2struct-textcaps-base': ( 'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json' ), } class A ( __snake_case ): __magic_name__ = '''pix2struct_text_model''' __magic_name__ = ['''past_key_values'''] __magic_name__ = { '''hidden_size''': '''hidden_size''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , SCREAMING_SNAKE_CASE=50244 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]: """simple docstring""" A : str = vocab_size A : List[str] = hidden_size A : List[Any] = d_kv A : Optional[Any] = d_ff A : Dict = num_layers A : Dict = num_heads A : Optional[int] = relative_attention_num_buckets A : Optional[Any] = relative_attention_max_distance A : Dict = dropout_rate A : Dict = layer_norm_epsilon A : Tuple = initializer_factor A : Union[str, Any] = use_cache A : int = eos_token_id A : List[str] = decoder_start_token_id # for backwards compatibility A : int = dense_act_fn super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , is_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) A, A : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": A : Union[str, Any] = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class A ( __snake_case ): __magic_name__ = '''pix2struct_vision_model''' def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=1e-10 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : List[str] = hidden_size A : Optional[Any] = patch_embed_hidden_size A : Union[str, Any] = d_ff A : Dict = dropout_rate A : str = num_hidden_layers A : Dict = num_attention_heads A : Tuple = initializer_range A : List[str] = initializer_factor A : Union[str, Any] = attention_dropout A : Tuple = layer_norm_eps A : int = dense_act_fn A : Optional[int] = seq_len A : Tuple = relative_attention_num_buckets A : str = relative_attention_max_distance A : Optional[Any] = d_kv @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) A, A : int = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": A : Optional[Any] = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class A ( __snake_case ): __magic_name__ = '''pix2struct''' __magic_name__ = True def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) if text_config is None: A : Dict = {} logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' ) if vision_config is None: A : str = {} logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' ) A : Dict = PixaStructTextConfig(**SCREAMING_SNAKE_CASE ) A : Any = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE ) A : Any = self.text_config.decoder_start_token_id A : Any = self.text_config.pad_token_id A : Dict = self.text_config.eos_token_id A : Union[str, Any] = initializer_factor A : Tuple = initializer_range A : Optional[Any] = self.initializer_range A : int = self.initializer_range A : Tuple = is_vqa @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Tuple = copy.deepcopy(self.__dict__ ) A : Dict = self.text_config.to_dict() A : int = self.vision_config.to_dict() A : Any = self.__class__.model_type return output
311
0
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split lowercase : Optional[Any] = datasets.load_iris() lowercase : Optional[Any] = np.array(data['data']) lowercase : Optional[Any] = np.array(data['target']) lowercase : Optional[int] = data['target_names'] lowercase : List[Any] = train_test_split(X, y) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' return np.linalg.norm(np.array(snake_case__ ) - np.array(snake_case__ ) ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=5 ): '''simple docstring''' A : Dict = zip(snake_case__ , snake_case__ ) # List of distances of all points from the point to be classified A : Any = [] for data_point in data: A : Any = euclidean_distance(data_point[0] , snake_case__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. A : str = [i[1] for i in sorted(snake_case__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified A : List[Any] = Counter(snake_case__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
354
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : List[str] = 2 A : Dict = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(snake_case__ ) if n > 1: factors.append(snake_case__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
311
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer lowercase : str = logging.get_logger(__name__) lowercase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} lowercase : Union[str, Any] = { 'vocab_file': { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt', }, 'tokenizer_file': { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json' ), 'google/realm-orqa-nq-openqa': ( 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json' ), 'google/realm-orqa-nq-reader': ( 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json' ), 'google/realm-orqa-wq-openqa': ( 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json' ), 'google/realm-orqa-wq-reader': ( 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json' ), }, } lowercase : str = { 'google/realm-cc-news-pretrained-embedder': 5_12, 'google/realm-cc-news-pretrained-encoder': 5_12, 'google/realm-cc-news-pretrained-scorer': 5_12, 'google/realm-cc-news-pretrained-openqa': 5_12, 'google/realm-orqa-nq-openqa': 5_12, 'google/realm-orqa-nq-reader': 5_12, 'google/realm-orqa-wq-openqa': 5_12, 'google/realm-orqa-wq-reader': 5_12, } lowercase : List[Any] = { 'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True}, 'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True}, 'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True}, 'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True}, 'google/realm-orqa-nq-openqa': {'do_lower_case': True}, 'google/realm-orqa-nq-reader': {'do_lower_case': True}, 'google/realm-orqa-wq-openqa': {'do_lower_case': True}, 'google/realm-orqa-wq-reader': {'do_lower_case': True}, } class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_INIT_CONFIGURATION __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = RealmTokenizer def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="[UNK]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="[PAD]" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , tokenize_chinese_chars=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , SCREAMING_SNAKE_CASE ) != do_lower_case or normalizer_state.get('''strip_accents''' , SCREAMING_SNAKE_CASE ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars ): A : List[Any] = getattr(SCREAMING_SNAKE_CASE , normalizer_state.pop('''type''' ) ) A : Dict = do_lower_case A : Union[str, Any] = strip_accents A : Tuple = tokenize_chinese_chars A : Union[str, Any] = normalizer_class(**SCREAMING_SNAKE_CASE ) A : str = do_lower_case def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Union[str, Any] = PaddingStrategy.MAX_LENGTH A : str = text A : Optional[int] = kwargs.pop('''text_pair''' , SCREAMING_SNAKE_CASE ) A : int = kwargs.pop('''return_tensors''' , SCREAMING_SNAKE_CASE ) A : int = { '''input_ids''': [], '''attention_mask''': [], '''token_type_ids''': [], } for idx, candidate_text in enumerate(SCREAMING_SNAKE_CASE ): if batch_text_pair is not None: A : List[str] = batch_text_pair[idx] else: A : Any = None A : Optional[Any] = super().__call__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : int = encoded_candidates.get('''input_ids''' ) A : List[Any] = encoded_candidates.get('''attention_mask''' ) A : List[Any] = encoded_candidates.get('''token_type_ids''' ) if encoded_input_ids is not None: output_data["input_ids"].append(SCREAMING_SNAKE_CASE ) if encoded_attention_mask is not None: output_data["attention_mask"].append(SCREAMING_SNAKE_CASE ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(SCREAMING_SNAKE_CASE ) A : List[Any] = {key: item for key, item in output_data.items() if len(SCREAMING_SNAKE_CASE ) != 0} return BatchEncoding(SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Any: """simple docstring""" A : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : List[Any] = [self.sep_token_id] A : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" A : int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE ) return tuple(SCREAMING_SNAKE_CASE )
355
'''simple docstring''' # Function to print upper half of diamond (pyramid) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for i in range(0 , snake_case__ ): for _ in range(0 , n - i - 1 ): # printing spaces print(''' ''' , end='''''' ) for _ in range(0 , i + 1 ): # printing stars print('''* ''' , end='''''' ) print() def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for i in range(snake_case__ , 0 , -1 ): for _ in range(snake_case__ , 0 , -1 ): # printing stars print('''* ''' , end='''''' ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(''' ''' , end='''''' ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if n <= 0: print(''' ... .... nothing printing :(''' ) return floyd(snake_case__ ) # upper half reverse_floyd(snake_case__ ) # lower half if __name__ == "__main__": print(R'| /\ | |- | |- |--| |\ /| |-') print(R'|/ \| |- |_ |_ |__| | \/ | |_') lowercase : List[str] = 1 while K: lowercase : List[Any] = int(input('enter the number and , and see the magic : ')) print() pretty_print(user_number) lowercase : Any = int(input('press 0 to exit... and 1 to continue...')) print('Good Bye...')
311
0
'''simple docstring''' from __future__ import annotations lowercase = list[tuple[int, int]] lowercase = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" A : int = pos_x A : Optional[Any] = pos_y A : Optional[Any] = (pos_y, pos_x) A : str = goal_x A : Optional[int] = goal_y A : List[Any] = g_cost A : str = parent A : str = self.calculate_heuristic() def __lowerCAmelCase ( self ) -> float: """simple docstring""" A : Optional[int] = abs(self.pos_x - self.goal_x ) A : Optional[Any] = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" return self.f_cost < other.f_cost class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE ) A : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE ) A : Optional[Any] = [self.start] A : list[Node] = [] A : Tuple = False def __lowerCAmelCase ( self ) -> Path | None: """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() A : Optional[int] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: A : Optional[int] = True return self.retrace_path(SCREAMING_SNAKE_CASE ) self.closed_nodes.append(SCREAMING_SNAKE_CASE ) A : Any = self.get_successors(SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: # retrieve the best current path A : str = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(SCREAMING_SNAKE_CASE ) if not self.reached: return [self.start.pos] return None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[Node]: """simple docstring""" A : List[Any] = [] for action in delta: A : List[str] = parent.pos_x + action[1] A : Dict = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE , ) ) return successors def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Path: """simple docstring""" A : int = node A : Union[str, Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) A : int = current_node.parent path.reverse() return path if __name__ == "__main__": lowercase = (0, 0) lowercase = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') lowercase = GreedyBestFirst(init, goal) lowercase = greedy_bf.search() if path: for pos_x, pos_y in path: lowercase = 2 for elem in grid: print(elem)
356
'''simple docstring''' # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__() self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" A : List[Any] = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE , ) A : Optional[Any] = image.to(self.device ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A : Tuple = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A : List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE ), "This is a local test"
311
0
'''simple docstring''' import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=[1, 16, 4, 4] , SCREAMING_SNAKE_CASE=None , ) -> Union[str, Any]: """simple docstring""" A : Any = parent A : str = batch_size A : Optional[Any] = image_size A : Dict = patch_size A : List[str] = num_channels A : Optional[int] = is_training A : List[str] = use_labels A : Tuple = hidden_size A : Optional[int] = num_hidden_layers A : Optional[Any] = num_attention_heads A : int = intermediate_size A : Union[str, Any] = hidden_act A : Dict = hidden_dropout_prob A : Union[str, Any] = attention_probs_dropout_prob A : Dict = type_sequence_label_size A : List[Any] = initializer_range A : int = scope A : Dict = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size A : Tuple = (self.image_size // 32) ** 2 A : Union[str, Any] = num_patches + 1 def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[int] = None if self.use_labels: A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Union[str, Any] = self.get_config() return config, pixel_values, labels def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Union[str, Any] = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [4, 8, 16, 32], '''num_groups''': 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : Optional[int] = ViTHybridModel(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Union[str, Any] = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Optional[int] = self.type_sequence_label_size A : Dict = ViTHybridForImageClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Dict = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Union[str, Any] = self.prepare_config_and_inputs() A : Tuple = config_and_inputs A : Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class A ( __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () __magic_name__ = ( {'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification} if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Optional[int] = ViTHybridModelTester(self ) A : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" pass def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : List[Any] = model_class(SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : int = model_class(SCREAMING_SNAKE_CASE ) A : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Tuple = [*signature.parameters.keys()] A : int = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() A : Union[str, Any] = _config_zero_init(SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: A : Union[str, Any] = model_class(config=SCREAMING_SNAKE_CASE ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": A : List[Any] = [F'{name}.{key}' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) @slow def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[int] = ViTHybridModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def lowerCAmelCase_ ( ): '''simple docstring''' A : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class A ( unittest.TestCase ): @cached_property def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[int] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( SCREAMING_SNAKE_CASE ) A : List[str] = self.default_image_processor A : Any = prepare_img() A : List[str] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): A : List[str] = model(**SCREAMING_SNAKE_CASE ) # verify the logits A : Union[str, Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE ) A : Dict = torch.tensor([-1.9_090, -0.4_993, -0.2_389] ).to(SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow @require_accelerate def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Optional[int] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' ) A : Union[str, Any] = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' ) A : Any = prepare_img() A : List[Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) A : Optional[int] = model(**SCREAMING_SNAKE_CASE ) A : List[Any] = outputs.logits # model predicts one of the 1000 ImageNet classes A : Any = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
357
'''simple docstring''' import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , ) -> str: """simple docstring""" A : Any = parent A : List[Any] = batch_size A : Union[str, Any] = seq_length A : Any = is_training A : int = use_input_mask A : Union[str, Any] = vocab_size A : List[Any] = hidden_size A : List[Any] = num_hidden_layers A : Optional[int] = num_attention_heads A : str = intermediate_size A : Tuple = hidden_act A : Union[str, Any] = hidden_dropout_prob A : Union[str, Any] = attention_probs_dropout_prob A : int = max_position_embeddings A : Optional[int] = initializer_range A : Any = use_labels A : Optional[int] = scope def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Optional[int] = None if self.use_input_mask: A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Dict = self.get_config() return config, input_ids, input_mask, token_labels def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" ( ( A ), ( A ), ( A ), ( A ), ) : Any = self.prepare_config_and_inputs() A : Tuple = True A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" A : List[str] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) A : int = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" A : List[str] = True A : Union[str, Any] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , ) A : List[Any] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = True A : Tuple = True A : Optional[int] = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval() # first forward pass A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , ) A : Optional[int] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size ) A : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and A : Dict = torch.cat([input_ids, next_tokens] , dim=-1 ) A : List[str] = torch.cat([input_mask, next_mask] , dim=-1 ) A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] A : Any = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] # select random slice A : int = ids_tensor((1,) , output_from_past.shape[-1] ).item() A : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() A : str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" A : Optional[Any] = BertGenerationDecoder(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A, A, A, A : Optional[int] = self.prepare_config_and_inputs() A : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () __magic_name__ = (BertGenerationDecoder,) if is_torch_available() else () __magic_name__ = ( {'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder} if is_torch_available() else {} ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : List[str] = BertGenerationEncoderTester(self ) A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A, A, A, A : Tuple = self.model_tester.prepare_config_and_inputs() A : str = '''bert''' self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" ( ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() A : Union[str, Any] = None self.model_tester.create_and_check_model_as_decoder( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Optional[Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Tuple = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) A : Optional[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): A : Dict = model(SCREAMING_SNAKE_CASE )[0] A : Optional[Any] = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Dict = torch.tensor( [[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[Any] = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) A : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): A : Optional[Any] = model(SCREAMING_SNAKE_CASE )[0] A : Optional[Any] = torch.Size([1, 8, 50358] ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Any = torch.tensor( [[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
311
0
'''simple docstring''' import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def lowerCAmelCase_ ( snake_case__ = 3 ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(snake_case__ ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) A : Optional[int] = QuantumRegister(snake_case__ , '''qr''' ) A : Optional[Any] = ClassicalRegister(snake_case__ , '''cr''' ) A : List[str] = QuantumCircuit(snake_case__ , snake_case__ ) A : Optional[int] = number_of_qubits for i in range(snake_case__ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(snake_case__ ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , snake_case__ , snake_case__ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(snake_case__ , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(snake_case__ , snake_case__ ) # simulate with 10000 shots A : List[str] = Aer.get_backend('''qasm_simulator''' ) A : Tuple = execute(snake_case__ , snake_case__ , shots=1_0000 ) return job.result().get_counts(snake_case__ ) if __name__ == "__main__": print( f'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
358
'''simple docstring''' import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return 1.0 / (1.0 + np.exp(-_outputs )) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[int] = np.max(_outputs , axis=-1 , keepdims=snake_case__ ) A : Any = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case__ ) class A ( __snake_case ): __magic_name__ = '''sigmoid''' __magic_name__ = '''softmax''' __magic_name__ = '''none''' @add_end_docstrings( __snake_case , R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class A ( __snake_case ): __magic_name__ = False __magic_name__ = ClassificationFunction.NONE def __init__( self , **SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="" , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Optional[Any] = tokenizer_kwargs A : int = {} if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None: A : int = self.model.config.return_all_scores if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or top_k is None: A : Union[str, Any] = top_k A : Dict = False elif return_all_scores is not None: warnings.warn( '''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of''' ''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , SCREAMING_SNAKE_CASE , ) if return_all_scores: A : Optional[int] = None else: A : Dict = 1 if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : Dict = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: A : int = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : str = super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. A : Any = '''top_k''' not in kwargs if isinstance(args[0] , SCREAMING_SNAKE_CASE ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]: """simple docstring""" A : List[Any] = self.framework if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return self.tokenizer(**SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0] , SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( '''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a''' ''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' ) return self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" return self.model(**SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=True ) -> List[str]: """simple docstring""" if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: A : Optional[int] = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: A : Any = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None: A : Optional[int] = self.model.config.function_to_apply else: A : Optional[int] = ClassificationFunction.NONE A : Any = model_outputs['''logits'''][0] A : List[Any] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: A : int = sigmoid(SCREAMING_SNAKE_CASE ) elif function_to_apply == ClassificationFunction.SOFTMAX: A : Any = softmax(SCREAMING_SNAKE_CASE ) elif function_to_apply == ClassificationFunction.NONE: A : int = outputs else: raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} A : int = [ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(SCREAMING_SNAKE_CASE ) ] if not _legacy: dict_scores.sort(key=lambda SCREAMING_SNAKE_CASE : x["score"] , reverse=SCREAMING_SNAKE_CASE ) if top_k is not None: A : Union[str, Any] = dict_scores[:top_k] return dict_scores
311
0
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) lowercase : Any = _symbol_database.Default() lowercase : int = _descriptor_pool.Default().AddSerializedFile( B'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' ) lowercase : Optional[int] = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS is False: lowercase : List[Any] = None lowercase : Tuple = B'H\003' # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" lowercase : List[str] = 45 lowercase : List[Any] = 15_81 lowercase : List[str] = 15_17 lowercase : List[Any] = 15_70 lowercase : Optional[int] = 15_84 lowercase : Any = 17_93 lowercase : Any = 17_95 lowercase : Tuple = 19_16 lowercase : Any = 18_64 lowercase : List[Any] = 19_05 lowercase : str = 19_19 lowercase : Any = 24_29 lowercase : Optional[int] = 22_08 lowercase : Dict = 24_18 lowercase : Tuple = 23_23 lowercase : Tuple = 24_07 # @@protoc_insertion_point(module_scope)
359
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def lowerCAmelCase_ ( snake_case__ = "laptop" ): '''simple docstring''' A : Tuple = F'https://www.amazon.in/laptop/s?k={product}' A : Optional[int] = { '''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''', '''Accept-Language''': '''en-US, en;q=0.5''', } A : Any = BeautifulSoup(requests.get(snake_case__ , headers=snake_case__ ).text ) # Initialize a Pandas dataframe with the column titles A : List[str] = DataFrame( columns=[ '''Product Title''', '''Product Link''', '''Current Price of the product''', '''Product Rating''', '''MRP of the product''', '''Discount''', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( '''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ): try: A : Optional[Any] = item.ha.text A : Union[str, Any] = '''https://www.amazon.in/''' + item.ha.a['''href'''] A : Tuple = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text try: A : int = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text except AttributeError: A : Optional[int] = '''Not available''' try: A : str = ( '''₹''' + item.find( '''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1] ) except AttributeError: A : List[Any] = '''''' try: A : Dict = float( ( ( float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) - float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) ) ) / float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) ) * 100 ) except ValueError: A : str = float('''nan''' ) except AttributeError: pass A : Union[str, Any] = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] A : List[str] = ''' ''' A : Optional[Any] = ''' ''' data_frame.index += 1 return data_frame if __name__ == "__main__": lowercase : Union[str, Any] = 'headphones' get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
311
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Any = tempfile.mkdtemp() # fmt: off A : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on A : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) A : Tuple = { '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } A : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A : int = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs] return image_inputs def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : int = self.get_tokenizer() A : Union[str, Any] = self.get_image_processor() A : List[str] = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) processor.save_pretrained(self.tmpdirname ) A : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Any = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) A : Optional[int] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 ) A : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Dict = self.get_image_processor() A : List[Any] = self.get_tokenizer() A : str = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) A : List[str] = self.prepare_image_inputs() A : int = image_processor(SCREAMING_SNAKE_CASE , return_tensors='''np''' ) A : List[Any] = processor(images=SCREAMING_SNAKE_CASE , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Optional[Any] = self.get_image_processor() A : int = self.get_tokenizer() A : Tuple = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) A : Dict = '''lower newer''' A : Tuple = processor(text=SCREAMING_SNAKE_CASE ) A : List[Any] = tokenizer(SCREAMING_SNAKE_CASE ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Optional[Any] = self.get_image_processor() A : int = self.get_tokenizer() A : Any = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) A : Dict = '''lower newer''' A : Any = self.prepare_image_inputs() A : str = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(SCREAMING_SNAKE_CASE ): processor() def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[str] = self.get_image_processor() A : Optional[Any] = self.get_tokenizer() A : Tuple = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) A : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A : List[Any] = processor.batch_decode(SCREAMING_SNAKE_CASE ) A : str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = self.get_image_processor() A : Union[str, Any] = self.get_tokenizer() A : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) A : Tuple = '''lower newer''' A : Optional[Any] = self.prepare_image_inputs() A : Tuple = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
360
'''simple docstring''' import colorsys from PIL import Image # type: ignore def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[int] = x A : str = y for step in range(snake_case__ ): # noqa: B007 A : str = a * a - b * b + x A : List[str] = 2 * a * b + y A : str = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1 ) ) def lowerCAmelCase_ ( snake_case__ = 800 , snake_case__ = 600 , snake_case__ = -0.6 , snake_case__ = 0 , snake_case__ = 3.2 , snake_case__ = 50 , snake_case__ = True , ): '''simple docstring''' A : List[Any] = Image.new('''RGB''' , (image_width, image_height) ) A : Tuple = img.load() # loop through the image-coordinates for image_x in range(snake_case__ ): for image_y in range(snake_case__ ): # determine the figure-coordinates based on the image-coordinates A : Optional[int] = figure_width / image_width * image_height A : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width A : List[str] = figure_center_y + (image_y / image_height - 0.5) * figure_height A : str = get_distance(snake_case__ , snake_case__ , snake_case__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: A : str = get_color_coded_rgb(snake_case__ ) else: A : List[Any] = get_black_and_white_rgb(snake_case__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure lowercase : Optional[Any] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
311
0
'''simple docstring''' import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> List[Any]: """simple docstring""" A : Optional[int] = parent A : Optional[Any] = batch_size A : Optional[int] = seq_length A : Optional[Any] = is_training A : Tuple = use_input_mask A : Any = use_token_type_ids A : Union[str, Any] = use_labels A : List[str] = vocab_size A : Any = hidden_size A : Union[str, Any] = embedding_size A : Dict = num_hidden_layers A : Tuple = num_attention_heads A : Dict = intermediate_size A : Optional[int] = hidden_act A : Optional[Any] = hidden_dropout_prob A : List[Any] = attention_probs_dropout_prob A : List[str] = max_position_embeddings A : Tuple = type_vocab_size A : str = type_sequence_label_size A : Any = initializer_range A : Union[str, Any] = num_labels A : List[Any] = num_choices A : Union[str, Any] = scope def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : int = None if self.use_input_mask: A : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) A : int = None if self.use_token_type_ids: A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A : Union[str, Any] = None A : List[Any] = None A : Tuple = None if self.use_labels: A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A : Any = ids_tensor([self.batch_size] , self.num_choices ) A : List[str] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Optional[Any] = MobileBertModel(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[int] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE ) A : str = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE ) A : str = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" A : Optional[Any] = MobileBertForMaskedLM(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : Dict = MobileBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Any = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : int = MobileBertForPreTraining(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Any = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , next_sentence_label=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : str = MobileBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : int = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : List[str] = self.num_labels A : Dict = MobileBertForSequenceClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Union[str, Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" A : Any = self.num_labels A : str = MobileBertForTokenClassification(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Union[str, Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : List[Any] = self.num_choices A : Tuple = MobileBertForMultipleChoice(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A : Union[str, Any] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : List[str] = self.prepare_config_and_inputs() ( A ) : Dict = config_and_inputs A : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class A ( __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) __magic_name__ = ( { '''feature-extraction''': MobileBertModel, '''fill-mask''': MobileBertForMaskedLM, '''question-answering''': MobileBertForQuestionAnswering, '''text-classification''': MobileBertForSequenceClassification, '''token-classification''': MobileBertForTokenClassification, '''zero-shot''': MobileBertForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ = True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> str: """simple docstring""" A : int = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) if return_labels: if model_class in get_values(SCREAMING_SNAKE_CASE ): A : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE ) A : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE ) return inputs_dict def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Dict = MobileBertModelTester(self ) A : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*SCREAMING_SNAKE_CASE ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return torch.tensor( snake_case__ , dtype=torch.long , device=snake_case__ , ) lowercase : Dict = 1e-3 @require_torch @require_sentencepiece @require_tokenizers class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[Any] = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(SCREAMING_SNAKE_CASE ) A : Optional[Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] ) with torch.no_grad(): A : List[Any] = model(SCREAMING_SNAKE_CASE )[0] A : Optional[Any] = torch.Size((1, 9, 512) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Union[str, Any] = torch.tensor( [ [ [-2.4_73_65_26e07, 8.2_69_16_56e04, 1.6_52_18_38e05], [-5.7_54_17_04e-01, 3.9_05_60_22e00, 4.4_01_15_07e00], [2.6_04_73_59e00, 1.5_67_76_52e00, -1.7_32_41_88e-01], ] ] , device=SCREAMING_SNAKE_CASE , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE A : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) A : Tuple = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
361
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowercase : Optional[int] = [ 'kernels/rwkv/wkv_cuda.cu', 'kernels/rwkv/wkv_op.cpp', 'kernels/deformable_detr/ms_deform_attn.h', 'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh', 'models/graphormer/algos_graphormer.pyx', ] def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowercase : str = argparse.ArgumentParser() parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.') lowercase : Optional[Any] = parser.parse_args() if args.check_lib: lowercase : List[Any] = importlib.import_module('transformers') lowercase : str = Path(transformers_module.__file__).parent else: lowercase : List[Any] = Path.cwd() / 'build/lib/transformers' if not test_custom_files_are_present(transformers_path): raise ValueError('The built release does not contain the custom files. Fix this before going further!')
311
0
'''simple docstring''' import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets lowercase : List[Any] = datasets.logging.get_logger(__name__) lowercase : Optional[Any] = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' lowercase : Union[str, Any] = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' lowercase : int = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=False , snake_case__=False , snake_case__=True , snake_case__=False , snake_case__="dummy_doc" ): '''simple docstring''' A : str = {doc: key_lines} A : Dict = {doc: sys_lines} A : Optional[int] = {} A : List[str] = 0 A : int = 0 A : Dict = 0 A : Union[str, Any] = 0 A : Dict = 0 A : str = 0 A : List[str] = reader.get_doc_mentions(snake_case__ , key_doc_lines[doc] , snake_case__ ) key_singletons_num += singletons_num if NP_only or min_span: A : Any = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__ ) A : int = reader.get_doc_mentions(snake_case__ , sys_doc_lines[doc] , snake_case__ ) sys_singletons_num += singletons_num if NP_only or min_span: A : Dict = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__ ) if remove_nested: A : List[str] = reader.remove_nested_coref_mentions(snake_case__ , snake_case__ ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters A : Tuple = reader.remove_nested_coref_mentions(snake_case__ , snake_case__ ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters A : Tuple = reader.get_mention_assignments(snake_case__ , snake_case__ ) A : Dict = reader.get_mention_assignments(snake_case__ , snake_case__ ) A : Dict = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( '''Number of removed nested coreferring mentions in the key ''' F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' ) logger.info( '''Number of resulting singleton clusters in the key ''' F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' ) if not keep_singletons: logger.info( F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ' '''files, respectively''' ) return doc_coref_infos def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Tuple = get_coref_infos(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A : Tuple = {} A : int = 0 A : Optional[int] = 0 for name, metric in metrics: A : Dict = evaluator.evaluate_documents(snake_case__ , snake_case__ , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} ) logger.info( name.ljust(10 ) , F'Recall: {recall * 100:.2f}' , F' Precision: {precision * 100:.2f}' , F' F1: {fa * 100:.2f}' , ) if conll_subparts_num == 3: A : List[Any] = (conll / 3) * 100 logger.info(F'CoNLL score: {conll:.2f}' ) output_scores.update({'''conll_score''': conll} ) return output_scores def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : str = False for line in key_lines: if not line.startswith('''#''' ): if len(line.split() ) > 6: A : Dict = line.split()[5] if not parse_col == "-": A : Tuple = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' ) ), '''references''': datasets.Sequence(datasets.Value('''string''' ) ), } ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[ '''https://github.com/ns-moosavi/coval''', '''https://www.aclweb.org/anthology/P16-1060''', '''http://www.conll.cemantix.org/2012/data.html''', ] , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> int: """simple docstring""" A : Dict = [ ('''mentions''', evaluator.mentions), ('''muc''', evaluator.muc), ('''bcub''', evaluator.b_cubed), ('''ceafe''', evaluator.ceafe), ('''lea''', evaluator.lea), ] if min_span: A : Any = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE ) if not has_gold_parse: raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" A : Tuple = evaluate( key_lines=SCREAMING_SNAKE_CASE , sys_lines=SCREAMING_SNAKE_CASE , metrics=SCREAMING_SNAKE_CASE , NP_only=SCREAMING_SNAKE_CASE , remove_nested=SCREAMING_SNAKE_CASE , keep_singletons=SCREAMING_SNAKE_CASE , min_span=SCREAMING_SNAKE_CASE , ) return score
362
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=2 , ) -> List[str]: """simple docstring""" A : List[str] = parent A : Optional[Any] = batch_size A : Tuple = image_size A : int = patch_size A : Optional[int] = num_channels A : str = is_training A : List[Any] = use_labels A : Any = hidden_size A : Any = num_hidden_layers A : Optional[int] = num_attention_heads A : Any = intermediate_size A : List[str] = hidden_act A : str = hidden_dropout_prob A : Tuple = attention_probs_dropout_prob A : Any = type_sequence_label_size A : Optional[int] = initializer_range A : Dict = scope A : Tuple = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A : List[Any] = (image_size // patch_size) ** 2 A : Tuple = num_patches + 2 def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Tuple = None if self.use_labels: A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Tuple = self.get_config() return config, pixel_values, labels def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : Any = TFDeiTModel(config=SCREAMING_SNAKE_CASE ) A : str = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Tuple = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE ) A : List[Any] = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A : Optional[int] = 1 A : str = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE ) A : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Tuple = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" A : str = self.type_sequence_label_size A : Optional[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE ) A : Optional[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A : Optional[Any] = 1 A : List[str] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE ) A : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Optional[int] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Optional[int] = self.prepare_config_and_inputs() A, A, A : Tuple = config_and_inputs A : Any = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A ( __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) __magic_name__ = ( { '''feature-extraction''': TFDeiTModel, '''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Tuple = TFDeiTModelTester(self ) A : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" pass def __lowerCAmelCase ( self ) -> str: """simple docstring""" A, A : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Any = model_class(SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) A : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Dense ) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A, A : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Any = model_class(SCREAMING_SNAKE_CASE ) A : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple: """simple docstring""" A : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def __lowerCAmelCase ( self ) -> str: """simple docstring""" for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : List[str] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def lowerCAmelCase_ ( ): '''simple docstring''' A : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class A ( unittest.TestCase ): @cached_property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) A : Dict = self.default_image_processor A : List[str] = prepare_img() A : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) # forward pass A : Optional[int] = model(**SCREAMING_SNAKE_CASE ) # verify the logits A : List[Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE ) A : str = tf.constant([-1.0_266, 0.1_912, -1.2_861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
311
0
'''simple docstring''' import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class A ( __snake_case ): def __init__( self , *SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = eval_examples A : int = post_process_function def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE = "eval" ) -> Any: """simple docstring""" A : Any = self.eval_dataset if eval_dataset is None else eval_dataset A : Union[str, Any] = self.get_eval_dataloader(SCREAMING_SNAKE_CASE ) A : Optional[int] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. A : Optional[int] = self.compute_metrics A : Union[str, Any] = None A : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop A : Optional[int] = time.time() try: A : Tuple = eval_loop( SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=SCREAMING_SNAKE_CASE , metric_key_prefix=SCREAMING_SNAKE_CASE , ) finally: A : List[Any] = compute_metrics A : Union[str, Any] = self.args.eval_batch_size * self.args.world_size if F'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default A : Optional[Any] = self.post_process_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , output.predictions ) A : Any = self.compute_metrics(SCREAMING_SNAKE_CASE ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'{metric_key_prefix}_' ): A : Optional[Any] = metrics.pop(SCREAMING_SNAKE_CASE ) metrics.update(output.metrics ) else: A : str = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(SCREAMING_SNAKE_CASE ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) A : str = self.callback_handler.on_evaluate(self.args , self.state , self.control , SCREAMING_SNAKE_CASE ) return metrics def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE = "test" ) -> Union[str, Any]: """simple docstring""" A : Optional[int] = self.get_test_dataloader(SCREAMING_SNAKE_CASE ) # Temporarily disable metric computation, we will do it in the loop here. A : Union[str, Any] = self.compute_metrics A : Union[str, Any] = None A : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop A : int = time.time() try: A : int = eval_loop( SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=SCREAMING_SNAKE_CASE , metric_key_prefix=SCREAMING_SNAKE_CASE , ) finally: A : int = compute_metrics A : List[Any] = self.args.eval_batch_size * self.args.world_size if F'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output A : Optional[int] = self.post_process_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , output.predictions , '''predict''' ) A : int = self.compute_metrics(SCREAMING_SNAKE_CASE ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'{metric_key_prefix}_' ): A : Tuple = metrics.pop(SCREAMING_SNAKE_CASE ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=SCREAMING_SNAKE_CASE )
363
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase : List[str] = { 'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'], 'tokenization_cpmant': ['CpmAntTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ 'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST', 'CpmAntForCausalLM', 'CpmAntModel', 'CpmAntPreTrainedModel', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
311
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : Union[str, Any] = logging.get_logger(__name__) lowercase : int = { 'google/vivit-b-16x2-kinetics400': ( 'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class A ( __snake_case ): __magic_name__ = '''vivit''' def __init__( self , SCREAMING_SNAKE_CASE=224 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=[2, 16, 16] , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu_fast" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-06 , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Any = hidden_size A : Union[str, Any] = num_hidden_layers A : int = num_attention_heads A : str = intermediate_size A : Optional[int] = hidden_act A : List[str] = hidden_dropout_prob A : Union[str, Any] = attention_probs_dropout_prob A : Tuple = initializer_range A : Dict = layer_norm_eps A : Any = image_size A : Any = num_frames A : Tuple = tubelet_size A : int = num_channels A : Optional[int] = qkv_bias super().__init__(**SCREAMING_SNAKE_CASE )
364
'''simple docstring''' from __future__ import annotations lowercase : Union[str, Any] = list[tuple[int, int]] lowercase : Optional[Any] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase : Any = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" A : int = pos_x A : Optional[Any] = pos_y A : Optional[Any] = (pos_y, pos_x) A : str = goal_x A : Optional[int] = goal_y A : List[Any] = g_cost A : str = parent A : str = self.calculate_heuristic() def __lowerCAmelCase ( self ) -> float: """simple docstring""" A : Optional[int] = abs(self.pos_x - self.goal_x ) A : Optional[Any] = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" return self.f_cost < other.f_cost class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE ) A : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE ) A : Optional[Any] = [self.start] A : list[Node] = [] A : Tuple = False def __lowerCAmelCase ( self ) -> Path | None: """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() A : Optional[int] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: A : Optional[int] = True return self.retrace_path(SCREAMING_SNAKE_CASE ) self.closed_nodes.append(SCREAMING_SNAKE_CASE ) A : Any = self.get_successors(SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: # retrieve the best current path A : str = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(SCREAMING_SNAKE_CASE ) if not self.reached: return [self.start.pos] return None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[Node]: """simple docstring""" A : List[Any] = [] for action in delta: A : List[str] = parent.pos_x + action[1] A : Dict = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE , ) ) return successors def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Path: """simple docstring""" A : int = node A : Union[str, Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) A : int = current_node.parent path.reverse() return path if __name__ == "__main__": lowercase : Tuple = (0, 0) lowercase : List[str] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') lowercase : int = GreedyBestFirst(init, goal) lowercase : Union[str, Any] = greedy_bf.search() if path: for pos_x, pos_y in path: lowercase : Dict = 2 for elem in grid: print(elem)
311
0
'''simple docstring''' import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) lowercase : Dict = logging.getLogger(__name__) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : int = git.Repo(search_parent_directories=snake_case__ ) A : Any = { '''repo_id''': str(snake_case__ ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), } with open(os.path.join(snake_case__ , '''git_log.json''' ) , '''w''' ) as f: json.dump(snake_case__ , snake_case__ , indent=4 ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if params.n_gpu <= 0: A : List[str] = 0 A : Tuple = -1 A : str = True A : List[Any] = False return assert torch.cuda.is_available() logger.info('''Initializing GPUs''' ) if params.n_gpu > 1: assert params.local_rank != -1 A : List[Any] = int(os.environ['''WORLD_SIZE'''] ) A : Union[str, Any] = int(os.environ['''N_GPU_NODE'''] ) A : Dict = int(os.environ['''RANK'''] ) # number of nodes / node ID A : int = params.world_size // params.n_gpu_per_node A : Optional[Any] = params.global_rank // params.n_gpu_per_node A : Union[str, Any] = True assert params.n_nodes == int(os.environ['''N_NODES'''] ) assert params.node_id == int(os.environ['''NODE_RANK'''] ) # local job (single GPU) else: assert params.local_rank == -1 A : Dict = 1 A : Dict = 0 A : Tuple = 0 A : str = 0 A : List[Any] = 1 A : int = 1 A : Tuple = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode A : str = params.node_id == 0 and params.local_rank == 0 A : Optional[Any] = params.n_nodes > 1 # summary A : List[str] = F'--- Global rank: {params.global_rank} - ' logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes ) logger.info(PREFIX + '''Node ID : %i''' % params.node_id ) logger.info(PREFIX + '''Local rank : %i''' % params.local_rank ) logger.info(PREFIX + '''World size : %i''' % params.world_size ) logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node ) logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) ) logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) ) logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) ) logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info('''Initializing PyTorch distributed''' ) torch.distributed.init_process_group( init_method='''env://''' , backend='''nccl''' , ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
365
'''simple docstring''' import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py lowercase : Any = 'src/transformers' lowercase : str = 'docs/source/en/tasks' def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: A : Union[str, Any] = f.readlines() # Find the start prompt. A : List[Any] = 0 while not lines[start_index].startswith(snake_case__ ): start_index += 1 start_index += 1 A : List[str] = start_index while not lines[end_index].startswith(snake_case__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. lowercase : int = direct_transformers_import(TRANSFORMERS_PATH) lowercase : str = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). lowercase : Optional[int] = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : int = TASK_GUIDE_TO_MODELS[task_guide] A : List[str] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() ) A : Union[str, Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n" def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A, A, A, A : Optional[int] = _find_text_in_file( filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , ) A : Optional[int] = get_model_list_for_task(snake_case__ ) if current_list != new_list: if overwrite: with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`' ''' to fix this.''' ) if __name__ == "__main__": lowercase : Dict = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') lowercase : List[Any] = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
311
0
'''simple docstring''' from __future__ import annotations import math def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True lowercase : Any = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): raise ValueError('''n must be an integer''' ) if n <= 0: raise ValueError('''n must be >= 0''' ) A : Optional[int] = [] for num in range(len(snake_case__ ) ): A : Dict = 0 while 2 * i * i <= odd_composites[num]: A : Optional[Any] = odd_composites[num] - 2 * i * i if is_prime(snake_case__ ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(snake_case__ ) == n: return list_nums return [] def lowerCAmelCase_ ( ): '''simple docstring''' return compute_nums(1 )[0] if __name__ == "__main__": print(f'''{solution() = }''')
366
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if len(snake_case__ ) <= 1: return [tuple(snake_case__ )] A : Tuple = [] def generate(snake_case__ , snake_case__ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , snake_case__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A, A : Optional[Any] = arr[k - 1], arr[i] else: # k is odd A, A : Optional[Any] = arr[k - 1], arr[0] generate(k - 1 , snake_case__ ) generate(len(snake_case__ ) , snake_case__ ) return res if __name__ == "__main__": lowercase : List[str] = input('Enter numbers separated by a comma:\n').strip() lowercase : int = [int(item) for item in user_input.split(',')] print(heaps(arr))
311
0
'''simple docstring''' import re def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )] def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : List[Any] = split_input(str_ ) return "".join( [''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' try: A : Union[str, Any] = split_input(snake_case__ ) if upper: A : Dict = ''''''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: A : Any = ''''''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return to_simple_case(snake_case__ ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' try: A : Tuple = to_simple_case(snake_case__ ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' return to_complex_case(snake_case__ , snake_case__ , '''_''' ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' return to_complex_case(snake_case__ , snake_case__ , '''-''' ) if __name__ == "__main__": __import__('doctest').testmod()
367
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A ( __snake_case ): __magic_name__ = (UniPCMultistepScheduler,) __magic_name__ = (('''num_inference_steps''', 25),) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : str = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''solver_type''': '''bh2''', } config.update(**SCREAMING_SNAKE_CASE ) return config def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : List[Any] = dict(self.forward_default_kwargs ) A : Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) A : Optional[Any] = self.dummy_sample A : int = 0.1 * sample A : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals A : List[Any] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE ) A : List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE ) new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals A : Dict = dummy_past_residuals[: new_scheduler.config.solver_order] A, A : Tuple = sample, sample for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ): A : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Optional[Any] = dict(self.forward_default_kwargs ) A : Tuple = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) A : List[Any] = self.dummy_sample A : int = 0.1 * sample A : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A : Optional[int] = self.get_scheduler_config() A : Any = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals (must be after setting timesteps) A : int = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE ) A : int = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residual (must be after setting timesteps) A : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order] A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" if scheduler is None: A : Dict = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Tuple = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : int = 10 A : Tuple = self.dummy_model() A : Any = self.dummy_sample_deter scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): A : int = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample return sample def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = dict(self.forward_default_kwargs ) A : List[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config() A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Optional[Any] = self.dummy_sample A : Optional[int] = 0.1 * sample if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ): scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ): A : Tuple = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A : Dict = [residual + 0.2, residual + 0.15, residual + 0.10] A : List[str] = dummy_past_residuals[: scheduler.config.solver_order] A : List[Any] = scheduler.timesteps[5] A : Dict = scheduler.timesteps[6] A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() ) A : List[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE ) A : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 A : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config ) A : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config ) A : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config ) A : Optional[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE ) A : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , ) A : Dict = self.full_loop( solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , ) assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers" def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE ) self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : int = self.full_loop() A : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : List[Any] = self.full_loop(prediction_type='''v_prediction''' ) A : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.1_014 ) < 1e-3 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Dict = self.scheduler_classes[0] A : List[Any] = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 ) A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Tuple = 10 A : Union[str, Any] = self.dummy_model() A : Dict = self.dummy_sample_deter.half() scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): A : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample assert sample.dtype == torch.floataa def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
311
0
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : Optional[int] = logging.get_logger(__name__) lowercase : Tuple = { 'google/pix2struct-textcaps-base': ( 'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json' ), } class A ( __snake_case ): __magic_name__ = '''pix2struct_text_model''' __magic_name__ = ['''past_key_values'''] __magic_name__ = { '''hidden_size''': '''hidden_size''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , SCREAMING_SNAKE_CASE=50244 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]: """simple docstring""" A : str = vocab_size A : List[str] = hidden_size A : List[Any] = d_kv A : Optional[Any] = d_ff A : Dict = num_layers A : Dict = num_heads A : Optional[int] = relative_attention_num_buckets A : Optional[Any] = relative_attention_max_distance A : Dict = dropout_rate A : Dict = layer_norm_epsilon A : Tuple = initializer_factor A : Union[str, Any] = use_cache A : int = eos_token_id A : List[str] = decoder_start_token_id # for backwards compatibility A : int = dense_act_fn super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , is_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) A : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": A : Union[str, Any] = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class A ( __snake_case ): __magic_name__ = '''pix2struct_vision_model''' def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=1e-10 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : List[str] = hidden_size A : Optional[Any] = patch_embed_hidden_size A : Union[str, Any] = d_ff A : Dict = dropout_rate A : str = num_hidden_layers A : Dict = num_attention_heads A : Tuple = initializer_range A : List[str] = initializer_factor A : Union[str, Any] = attention_dropout A : Tuple = layer_norm_eps A : int = dense_act_fn A : Optional[int] = seq_len A : Tuple = relative_attention_num_buckets A : str = relative_attention_max_distance A : Optional[Any] = d_kv @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) A : int = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": A : Optional[Any] = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class A ( __snake_case ): __magic_name__ = '''pix2struct''' __magic_name__ = True def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) if text_config is None: A : Dict = {} logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' ) if vision_config is None: A : str = {} logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' ) A : Dict = PixaStructTextConfig(**SCREAMING_SNAKE_CASE ) A : Any = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE ) A : Any = self.text_config.decoder_start_token_id A : Any = self.text_config.pad_token_id A : Dict = self.text_config.eos_token_id A : Union[str, Any] = initializer_factor A : Tuple = initializer_range A : Optional[Any] = self.initializer_range A : int = self.initializer_range A : Tuple = is_vqa @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Tuple = copy.deepcopy(self.__dict__ ) A : Dict = self.text_config.to_dict() A : int = self.vision_config.to_dict() A : Any = self.__class__.model_type return output
368
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM A : Dict = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE ): A : List[Any] = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: A : Optional[int] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size: raise ValueError( F'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch' F' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) A : str = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A : Any = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A : int = self.scheduler.step( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , use_clipped_model_output=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample A : Dict = (image / 2 + 0.5).clamp(0 , 1 ) A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A : int = self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
311
0
'''simple docstring''' import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> Union[str, Any]: """simple docstring""" A : List[str] = parent A : Optional[Any] = batch_size A : Optional[Any] = seq_length A : List[Any] = is_training A : Optional[Any] = use_input_mask A : Dict = use_token_type_ids A : List[str] = use_labels A : List[Any] = vocab_size A : Optional[Any] = hidden_size A : int = embedding_size A : Union[str, Any] = num_hidden_layers A : Tuple = num_attention_heads A : Optional[int] = intermediate_size A : Union[str, Any] = hidden_act A : List[Any] = hidden_dropout_prob A : Dict = attention_probs_dropout_prob A : str = max_position_embeddings A : str = type_vocab_size A : Dict = type_sequence_label_size A : str = initializer_range A : Optional[int] = num_labels A : Tuple = num_choices A : str = scope def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : int = None if self.use_input_mask: A : str = random_attention_mask([self.batch_size, self.seq_length] ) A : Optional[Any] = None if self.use_token_type_ids: A : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A : Dict = None A : int = None A : Optional[Any] = None if self.use_labels: A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) A : Optional[int] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : List[Any] = MegatronBertModel(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE ) A : Optional[int] = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A : Union[str, Any] = MegatronBertForMaskedLM(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : int = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : str = MegatronBertForCausalLM(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Tuple = MegatronBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A : Optional[Any] = MegatronBertForPreTraining(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : List[str] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , next_sentence_label=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : Any = MegatronBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : List[Any] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : Optional[int] = self.num_labels A : List[Any] = MegatronBertForSequenceClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Tuple = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : Dict = self.num_labels A : Union[str, Any] = MegatronBertForTokenClassification(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : int = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : List[Any] = self.num_choices A : Optional[int] = MegatronBertForMultipleChoice(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A : Tuple = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : str = self.prepare_config_and_inputs() ( A ) : str = config_and_inputs A : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class A ( __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) __magic_name__ = ( { '''feature-extraction''': MegatronBertModel, '''fill-mask''': MegatronBertForMaskedLM, '''question-answering''': MegatronBertForQuestionAnswering, '''text-classification''': MegatronBertForSequenceClassification, '''text-generation''': MegatronBertForCausalLM, '''token-classification''': MegatronBertForTokenClassification, '''zero-shot''': MegatronBertForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ = True # test_resize_embeddings = False __magic_name__ = False def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Optional[int]: """simple docstring""" A : str = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) if return_labels: if model_class in get_values(SCREAMING_SNAKE_CASE ): A : List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE ) A : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE ) return inputs_dict def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : int = MegatronBertModelTester(self ) A : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*SCREAMING_SNAKE_CASE ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return torch.tensor( snake_case__ , dtype=torch.long , device=snake_case__ , ) lowercase : Optional[Any] = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class A ( unittest.TestCase ): @slow @unittest.skip('''Model is not available.''' ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Tuple = '''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: A : List[Any] = os.path.join(os.environ['''MYDIR'''] , SCREAMING_SNAKE_CASE ) A : Optional[Any] = MegatronBertModel.from_pretrained(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.half() A : Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] ) with torch.no_grad(): A : List[str] = model(SCREAMING_SNAKE_CASE )[0] A : List[Any] = torch.Size((1, 9, 1024) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Any = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728] for ii in range(3 ): for jj in range(3 ): A : Union[str, Any] = output[0, ii, jj] A : str = expected[3 * ii + jj] A : List[str] = '''ii={} jj={} a={} b={}'''.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.assertTrue(math.isclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rel_tol=SCREAMING_SNAKE_CASE , abs_tol=SCREAMING_SNAKE_CASE ) , msg=SCREAMING_SNAKE_CASE )
369
'''simple docstring''' from __future__ import annotations from random import random class A : def __init__( self , SCREAMING_SNAKE_CASE = None ) -> Tuple: """simple docstring""" A : Optional[Any] = value A : Any = random() A : Node | None = None A : Node | None = None def __repr__( self ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return F'\'{self.value}: {self.prior:.5}\'' else: return pformat( {F'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 ) def __str__( self ) -> str: """simple docstring""" A : Optional[Any] = str(self.value ) + ''' ''' A : Union[str, Any] = str(self.left or '''''' ) A : Any = str(self.right or '''''' ) return value + left + right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: A, A : Any = split(root.left , snake_case__ ) return left, root else: A, A : Optional[int] = split(root.right , snake_case__ ) return root, right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: A : List[str] = merge(left.right , snake_case__ ) return left else: A : Tuple = merge(snake_case__ , right.left ) return right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : List[Any] = Node(snake_case__ ) A, A : Tuple = split(snake_case__ , snake_case__ ) return merge(merge(snake_case__ , snake_case__ ) , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A, A : Dict = split(snake_case__ , value - 1 ) A, A : Any = split(snake_case__ , snake_case__ ) return merge(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not root: # None return else: inorder(root.left ) print(root.value , end=''',''' ) inorder(root.right ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' for arg in args.split(): if arg[0] == "+": A : int = insert(snake_case__ , int(arg[1:] ) ) elif arg[0] == "-": A : int = erase(snake_case__ , int(arg[1:] ) ) else: print('''Unknown command''' ) return root def lowerCAmelCase_ ( ): '''simple docstring''' A : Union[str, Any] = None print( '''enter numbers to create a tree, + value to add value into treap, ''' '''- value to erase all nodes with value. \'q\' to quit. ''' ) A : Optional[int] = input() while args != "q": A : str = interact_treap(snake_case__ , snake_case__ ) print(snake_case__ ) A : Union[str, Any] = input() print('''good by!''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
311
0
'''simple docstring''' import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer A : str = flax_key_tuple[:-1] + ('''weight''',) A : Any = torch.permute(snake_case__ , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ): # linear layer A : Optional[Any] = flax_key_tuple[:-1] + ('''weight''',) A : Optional[Any] = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: A : Union[str, Any] = flax_key_tuple[:-1] + ('''weight''',) return flax_key_tuple, flax_tensor def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if "metadata" in layer: A : List[str] = layer.split('''metadata''' ) A : Union[str, Any] = ''''''.join(split_layer[0] )[:-1] A : Dict = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )] elif "kvstore" in layer: A : Tuple = layer.split('''kvstore''' ) A : List[Any] = ''''''.join(split_layer[0] )[:-1] A : str = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )] else: A : str = layer.split('''/''' ) A : Tuple = '''/'''.join(split_layer[:-1] ) A : str = (split_layer[-1],) if "kvstore/path" in layer: A : Any = F'{switch_checkpoint_path}/{checkpoint_info[layer]}' elif "kvstore/driver" in layer: A : Tuple = '''file''' else: A : Dict = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[int] = rename_keys(snake_case__ ) A : Optional[Any] = {} for k, v in current_block.items(): A : Dict = v A : Tuple = new_current_block torch.save(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = WEIGHTS_NAME ): '''simple docstring''' A : Optional[Any] = convert_file_size_to_int(snake_case__ ) A : List[Any] = [] A : List[Any] = {} A : Dict = 0 A : Tuple = 0 os.makedirs(snake_case__ , exist_ok=snake_case__ ) with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp: A : str = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target'''] A : int = flatten_dict(snake_case__ , sep='''/''' ) A : Optional[Any] = {} for layer in checkpoint_info.keys(): A : Dict = get_key_and_tensorstore_dict( snake_case__ , snake_case__ , snake_case__ ) if curr_real_layer_name in all_layers: A : Tuple = content else: A : int = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file A : List[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() A : Dict = torch.tensor(snake_case__ ) A : List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts A : Union[str, Any] = rename_base_flax_keys(tuple(key.split('''/''' ) ) , snake_case__ ) A : Any = '''/'''.join(snake_case__ ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: A : List[Any] = os.path.join( snake_case__ , weights_name.replace('''.bin''' , F'-{len(snake_case__ )+1:05d}-of-???.bin' ) ) rename_and_save_block(snake_case__ , snake_case__ ) sharded_state_dicts.append(current_block.keys() ) del current_block A : Dict = {} A : Optional[int] = 0 A : str = raw_weights.to(getattr(snake_case__ , snake_case__ ) ) current_block_size += weight_size total_size += weight_size # Add the last block A : Optional[Any] = os.path.join(snake_case__ , weights_name.replace('''.bin''' , F'-{len(snake_case__ )+1:05d}-of-???.bin' ) ) rename_and_save_block(snake_case__ , snake_case__ ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(snake_case__ ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index A : int = {} A : str = {} for idx, shard in enumerate(snake_case__ ): A : Optional[int] = weights_name.replace( '''.bin''' , F'-{idx+1:05d}-of-{len(snake_case__ ):05d}.bin' ) # len(sharded_state_dicts):05d} A : Optional[Any] = os.path.join(snake_case__ , weights_name.replace('''.bin''' , F'-{idx+1:05d}-of-???.bin' ) ) os.rename(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) A : Optional[Any] = shard for key in shard: A : Optional[int] = shard_file # Add the metadata A : Optional[Any] = {'''total_size''': total_size} A : Dict = {'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' , encoding='''utf-8''' ) as f: A : List[str] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n''' f.write(snake_case__ ) return metadata, index if __name__ == "__main__": lowercase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--switch_t5x_checkpoint_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size') parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted', type=str, required=False, help='Path to the output pytorch model.', ) lowercase : List[Any] = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def lowerCAmelCase_ ( ): '''simple docstring''' from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer A : Dict = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' ) config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' ) A : List[str] = SwitchTransformersForConditionalGeneration.from_pretrained( '''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' ) A : Optional[int] = TaTokenizer.from_pretrained('''t5-small''' ) A : str = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''' A : List[Any] = tokenizer(snake_case__ , return_tensors='''pt''' ).input_ids A : List[Any] = model.generate(snake_case__ , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
370
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=sys.maxsize ) -> Union[str, Any]: """simple docstring""" A : Tuple = '''bilinear''' A : Optional[int] = max_size A : Dict = short_edge_length def __call__( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Tuple = [] for img in imgs: A, A : str = img.shape[:2] # later: provide list and randomly choose index for resize A : Union[str, Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img A : int = size * 1.0 / min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if h < w: A, A : Tuple = size, scale * w else: A, A : str = scale * h, size if max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) > self.max_size: A : List[str] = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Tuple = newh * scale A : int = neww * scale A : List[str] = int(neww + 0.5 ) A : int = int(newh + 0.5 ) if img.dtype == np.uinta: A : Dict = Image.fromarray(SCREAMING_SNAKE_CASE ) A : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) A : str = np.asarray(SCREAMING_SNAKE_CASE ) else: A : Dict = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw A : List[Any] = nn.functional.interpolate( SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE ).squeeze(0 ) img_augs.append(SCREAMING_SNAKE_CASE ) return img_augs class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) A : str = cfg.INPUT.FORMAT A : int = cfg.SIZE_DIVISIBILITY A : Optional[int] = cfg.PAD_VALUE A : Dict = cfg.INPUT.MAX_SIZE_TEST A : Optional[Any] = cfg.MODEL.DEVICE A : Dict = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) A : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) A : str = lambda SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : Union[str, Any] = tuple(max(SCREAMING_SNAKE_CASE ) for s in zip(*[img.shape for img in images] ) ) A : List[str] = [im.shape[-2:] for im in images] A : Optional[Any] = [ nn.functional.pad( SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] return torch.stack(SCREAMING_SNAKE_CASE ), torch.tensor(SCREAMING_SNAKE_CASE ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : str = [images] if single_image: assert len(SCREAMING_SNAKE_CASE ) == 1 for i in range(len(SCREAMING_SNAKE_CASE ) ): if isinstance(images[i] , torch.Tensor ): images.insert(SCREAMING_SNAKE_CASE , images.pop(SCREAMING_SNAKE_CASE ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge A : Tuple = torch.tensor([im.shape[:2] for im in images] ) A : Dict = self.aug(SCREAMING_SNAKE_CASE ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic A : Tuple = [self.normalizer(SCREAMING_SNAKE_CASE ) for x in images] # now pad them to do the following operations A, A : Optional[int] = self.pad(SCREAMING_SNAKE_CASE ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad A : Tuple = torch.true_divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' assert torch.isfinite(snake_case__ ).all(), "Box tensor contains infinite or NaN!" A, A : str = box_size tensor[:, 0].clamp_(min=0 , max=snake_case__ ) tensor[:, 1].clamp_(min=0 , max=snake_case__ ) tensor[:, 2].clamp_(min=0 , max=snake_case__ ) tensor[:, 3].clamp_(min=0 , max=snake_case__ )
311
0
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[int] = image.size A : Any = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 A : Union[str, Any] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) A : Optional[Any] = np.array(snake_case__ ).astype(np.floataa ) / 255.0 A : int = image[None].transpose(0 , 3 , 1 , 2 ) A : Optional[Any] = torch.from_numpy(snake_case__ ) return 2.0 * image - 1.0 class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__() self.register_modules(vqvae=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 100 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , ) -> Union[Tuple, ImagePipelineOutput]: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ): A : Tuple = 1 elif isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ): A : Optional[int] = image.shape[0] else: raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE )}' ) if isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ): A : List[str] = preprocess(SCREAMING_SNAKE_CASE ) A : Dict = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image A : Dict = (batch_size, self.unet.config.in_channels // 2, height, width) A : Tuple = next(self.unet.parameters() ).dtype A : List[Any] = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = image.to(device=self.device , dtype=SCREAMING_SNAKE_CASE ) # set timesteps and move to the correct device self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE , device=self.device ) A : Dict = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler A : List[str] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A : Dict = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A : Dict = {} if accepts_eta: A : Union[str, Any] = eta for t in self.progress_bar(SCREAMING_SNAKE_CASE ): # concat latents and low resolution image in the channel dimension. A : Optional[Any] = torch.cat([latents, image] , dim=1 ) A : str = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # predict the noise residual A : List[Any] = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample # compute the previous noisy sample x_t -> x_t-1 A : Tuple = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample # decode the image latents with the VQVAE A : List[Any] = self.vqvae.decode(SCREAMING_SNAKE_CASE ).sample A : List[Any] = torch.clamp(SCREAMING_SNAKE_CASE , -1.0 , 1.0 ) A : Tuple = image / 2 + 0.5 A : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A : Optional[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
371
'''simple docstring''' import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": lowercase : Tuple = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '--original_config_file', default=None, type=str, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--scheduler_type', default='pndm', type=str, help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']', ) parser.add_argument( '--pipeline_type', default=None, type=str, help=( 'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'' '. If `None` pipeline will be automatically inferred.' ), ) parser.add_argument( '--image_size', default=None, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--prediction_type', default=None, type=str, help=( 'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable' ' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') parser.add_argument( '--stable_unclip', type=str, default=None, required=False, help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.', ) parser.add_argument( '--stable_unclip_prior', type=str, default=None, required=False, help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.', ) parser.add_argument( '--clip_stats_path', type=str, help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.', required=False, ) parser.add_argument( '--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.' ) parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--vae_path', type=str, default=None, required=False, help='Set to a path, hub id to an already converted vae to not convert it again.', ) lowercase : Tuple = parser.parse_args() lowercase : Union[str, Any] = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
311
0
'''simple docstring''' import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( '''kwargs, expected''' , [ ({'''num_shards''': 0, '''max_num_jobs''': 1}, []), ({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]), ({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(snake_case__ , i + 1 ) for i in range(10 )]), ({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]), ({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Union[str, Any] = _distribute_shards(**snake_case__ ) assert out == expected @pytest.mark.parametrize( '''gen_kwargs, max_num_jobs, expected''' , [ ({'''foo''': 0}, 10, [{'''foo''': 0}]), ({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]), ({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]), ({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]), ({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]), ] , ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Any = _split_gen_kwargs(snake_case__ , snake_case__ ) assert out == expected @pytest.mark.parametrize( '''gen_kwargs, expected''' , [ ({'''foo''': 0}, 1), ({'''shards''': [0]}, 1), ({'''shards''': [0, 1, 2, 3]}, 4), ({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4), ({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4), ({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError), ] , ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if expected is RuntimeError: with pytest.raises(snake_case__ ): _number_of_shards_in_gen_kwargs(snake_case__ ) else: A : List[Any] = _number_of_shards_in_gen_kwargs(snake_case__ ) assert out == expected
350
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal lowercase : str = datasets.utils.logging.get_logger(__name__) lowercase : Union[str, Any] = ['names', 'prefix'] lowercase : Union[str, Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] lowercase : List[Any] = ['encoding_errors', 'on_bad_lines'] lowercase : Any = ['date_format'] @dataclass class A ( datasets.BuilderConfig ): __magic_name__ = "," __magic_name__ = None __magic_name__ = "infer" __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = False __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = True __magic_name__ = False __magic_name__ = True __magic_name__ = None __magic_name__ = "." __magic_name__ = None __magic_name__ = '"' __magic_name__ = 0 __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = True __magic_name__ = 0 __magic_name__ = True __magic_name__ = False __magic_name__ = None __magic_name__ = 10000 __magic_name__ = None __magic_name__ = "strict" __magic_name__ = "error" __magic_name__ = None def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" if self.delimiter is not None: A : Optional[Any] = self.delimiter if self.column_names is not None: A : Optional[Any] = self.column_names @property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : str = { '''sep''': self.sep, '''header''': self.header, '''names''': self.names, '''index_col''': self.index_col, '''usecols''': self.usecols, '''prefix''': self.prefix, '''mangle_dupe_cols''': self.mangle_dupe_cols, '''engine''': self.engine, '''converters''': self.converters, '''true_values''': self.true_values, '''false_values''': self.false_values, '''skipinitialspace''': self.skipinitialspace, '''skiprows''': self.skiprows, '''nrows''': self.nrows, '''na_values''': self.na_values, '''keep_default_na''': self.keep_default_na, '''na_filter''': self.na_filter, '''verbose''': self.verbose, '''skip_blank_lines''': self.skip_blank_lines, '''thousands''': self.thousands, '''decimal''': self.decimal, '''lineterminator''': self.lineterminator, '''quotechar''': self.quotechar, '''quoting''': self.quoting, '''escapechar''': self.escapechar, '''comment''': self.comment, '''encoding''': self.encoding, '''dialect''': self.dialect, '''error_bad_lines''': self.error_bad_lines, '''warn_bad_lines''': self.warn_bad_lines, '''skipfooter''': self.skipfooter, '''doublequote''': self.doublequote, '''memory_map''': self.memory_map, '''float_precision''': self.float_precision, '''chunksize''': self.chunksize, '''encoding_errors''': self.encoding_errors, '''on_bad_lines''': self.on_bad_lines, '''date_format''': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class A ( datasets.ArrowBasedBuilder ): __magic_name__ = CsvConfig def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) A : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(SCREAMING_SNAKE_CASE , (str, list, tuple) ): A : str = data_files if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : int = [files] A : Optional[int] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] A : Tuple = [] for split_name, files in data_files.items(): if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : List[str] = [files] A : List[str] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files] splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) ) return splits def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> pa.Table: """simple docstring""" if self.config.features is not None: A : Optional[int] = self.config.features.arrow_schema if all(not require_storage_cast(SCREAMING_SNAKE_CASE ) for feature in self.config.features.values() ): # cheaper cast A : List[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE ) else: # more expensive cast; allows str <-> int/float or str to Audio for example A : int = table_cast(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return pa_table def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" A : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str A : int = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE ) ): A : Union[str, Any] = pd.read_csv(SCREAMING_SNAKE_CASE , iterator=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE ): A : Dict = pa.Table.from_pandas(SCREAMING_SNAKE_CASE ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE ) except ValueError as e: logger.error(F'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE )}: {e}' ) raise
311
0
'''simple docstring''' import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict lowercase : List[Any] = namedtuple( '_TestCommandArgs', [ 'dataset', 'name', 'cache_dir', 'data_dir', 'all_configs', 'save_infos', 'ignore_verifications', 'force_redownload', 'clear_cache', ], defaults=[None, None, None, False, False, False, False, False], ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' return (abs(source - target ) / target) < 0.01 @pytest.mark.integration def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : List[Any] = _TestCommandArgs(dataset=snake_case__ , all_configs=snake_case__ , save_infos=snake_case__ ) A : Optional[int] = TestCommand(*snake_case__ ) test_command.run() A : List[str] = os.path.join(snake_case__ , '''README.md''' ) assert os.path.exists(snake_case__ ) A : Union[str, Any] = DatasetInfosDict.from_directory(snake_case__ ) A : Dict = DatasetInfosDict( { '''default''': DatasetInfo( features=Features( { '''tokens''': Sequence(Value('''string''' ) ), '''ner_tags''': Sequence( ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ), '''langs''': Sequence(Value('''string''' ) ), '''spans''': Sequence(Value('''string''' ) ), } ) , splits=[ { '''name''': '''train''', '''num_bytes''': 235_1563, '''num_examples''': 1_0000, }, { '''name''': '''validation''', '''num_bytes''': 23_8418, '''num_examples''': 1000, }, ] , download_size=394_0680 , dataset_size=258_9981 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: A : str = getattr(dataset_infos['''default'''] , snake_case__ ), getattr(expected_dataset_infos['''default'''] , snake_case__ ) if key == "num_bytes": assert is_apercent_close(snake_case__ , snake_case__ ) elif key == "splits": assert list(snake_case__ ) == list(snake_case__ ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
351
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : int = logging.get_logger(__name__) lowercase : int = { 'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json', # See all SEW models at https://huggingface.co/models?filter=sew } class A ( __snake_case ): __magic_name__ = '''sew''' def __init__( self , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE="group" , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.05 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="mean" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=2 , **SCREAMING_SNAKE_CASE , ) -> Tuple: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE , pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE ) A : Optional[Any] = hidden_size A : Any = feat_extract_norm A : Optional[int] = feat_extract_activation A : Tuple = list(SCREAMING_SNAKE_CASE ) A : List[str] = list(SCREAMING_SNAKE_CASE ) A : List[str] = list(SCREAMING_SNAKE_CASE ) A : int = conv_bias A : List[Any] = num_conv_pos_embeddings A : Tuple = num_conv_pos_embedding_groups A : int = len(self.conv_dim ) A : Dict = num_hidden_layers A : Optional[int] = intermediate_size A : Any = squeeze_factor A : int = hidden_act A : str = num_attention_heads A : Dict = hidden_dropout A : Optional[Any] = attention_dropout A : List[str] = activation_dropout A : Union[str, Any] = feat_proj_dropout A : Union[str, Any] = final_dropout A : int = layerdrop A : Optional[Any] = layer_norm_eps A : Any = initializer_range A : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A : Optional[Any] = apply_spec_augment A : Optional[Any] = mask_time_prob A : Union[str, Any] = mask_time_length A : Optional[Any] = mask_time_min_masks A : str = mask_feature_prob A : Tuple = mask_feature_length A : Any = mask_feature_min_masks # ctc loss A : List[Any] = ctc_loss_reduction A : Dict = ctc_zero_infinity # sequence classification A : int = use_weighted_layer_sum A : Optional[int] = classifier_proj_size @property def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
311
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : List[str] = logging.get_logger(__name__) lowercase : Optional[Any] = { 'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json', 'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json', } class A ( __snake_case ): __magic_name__ = '''luke''' def __init__( self , SCREAMING_SNAKE_CASE=50267 , SCREAMING_SNAKE_CASE=500000 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , **SCREAMING_SNAKE_CASE , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : str = vocab_size A : Dict = entity_vocab_size A : Tuple = hidden_size A : str = entity_emb_size A : List[Any] = num_hidden_layers A : Union[str, Any] = num_attention_heads A : Tuple = hidden_act A : Optional[int] = intermediate_size A : Optional[Any] = hidden_dropout_prob A : int = attention_probs_dropout_prob A : List[Any] = max_position_embeddings A : List[Any] = type_vocab_size A : Tuple = initializer_range A : Optional[int] = layer_norm_eps A : str = use_entity_aware_attention A : Union[str, Any] = classifier_dropout
352
'''simple docstring''' import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = SwinConfig() A : List[Any] = swin_name.split('''_''' ) A : Tuple = name_split[1] A : Union[str, Any] = int(name_split[4] ) A : str = int(name_split[3][-1] ) if model_size == "tiny": A : Optional[int] = 96 A : Optional[Any] = (2, 2, 6, 2) A : Any = (3, 6, 12, 24) elif model_size == "small": A : Optional[int] = 96 A : str = (2, 2, 18, 2) A : Tuple = (3, 6, 12, 24) elif model_size == "base": A : int = 128 A : Optional[Any] = (2, 2, 18, 2) A : List[str] = (4, 8, 16, 32) else: A : Dict = 192 A : Optional[Any] = (2, 2, 18, 2) A : Optional[Any] = (6, 12, 24, 48) if "in22k" in swin_name: A : Dict = 2_1841 else: A : str = 1000 A : List[str] = '''huggingface/label-files''' A : Any = '''imagenet-1k-id2label.json''' A : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) ) A : str = {int(snake_case__ ): v for k, v in idalabel.items()} A : Tuple = idalabel A : Tuple = {v: k for k, v in idalabel.items()} A : Tuple = img_size A : Dict = num_classes A : Optional[Any] = embed_dim A : str = depths A : str = num_heads A : Optional[int] = window_size return config def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if "patch_embed.proj" in name: A : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: A : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: A : Optional[int] = '''encoder.''' + name if "attn.proj" in name: A : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: A : List[str] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: A : Any = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: A : Tuple = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: A : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: A : str = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "norm.weight": A : Tuple = '''layernorm.weight''' if name == "norm.bias": A : Tuple = '''layernorm.bias''' if "head" in name: A : Any = name.replace('''head''' , '''classifier''' ) else: A : List[Any] = '''swin.''' + name return name def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): A : Dict = orig_state_dict.pop(snake_case__ ) if "mask" in key: continue elif "qkv" in key: A : Dict = key.split('''.''' ) A : Optional[int] = int(key_split[1] ) A : List[str] = int(key_split[3] ) A : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: A : Any = val[:dim, :] A : Dict = val[ dim : dim * 2, : ] A : List[str] = val[-dim:, :] else: A : Any = val[ :dim ] A : Optional[int] = val[ dim : dim * 2 ] A : Any = val[ -dim: ] else: A : str = val return orig_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Tuple = timm.create_model(snake_case__ , pretrained=snake_case__ ) timm_model.eval() A : Optional[Any] = get_swin_config(snake_case__ ) A : Optional[int] = SwinForImageClassification(snake_case__ ) model.eval() A : List[str] = convert_state_dict(timm_model.state_dict() , snake_case__ ) model.load_state_dict(snake_case__ ) A : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A : Any = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) ) A : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) A : List[Any] = image_processor(images=snake_case__ , return_tensors='''pt''' ) A : Any = timm_model(inputs['''pixel_values'''] ) A : Optional[Any] = model(**snake_case__ ).logits assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case__ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swin_name', default='swin_tiny_patch4_window7_224', type=str, help='Name of the Swin timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowercase : int = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
311
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase : List[Any] = { 'configuration_owlvit': [ 'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OwlViTConfig', 'OwlViTOnnxConfig', 'OwlViTTextConfig', 'OwlViTVisionConfig', ], 'processing_owlvit': ['OwlViTProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Dict = ['OwlViTFeatureExtractor'] lowercase : Tuple = ['OwlViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = [ 'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OwlViTModel', 'OwlViTPreTrainedModel', 'OwlViTTextModel', 'OwlViTVisionModel', 'OwlViTForObjectDetection', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
353
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : Optional[int] = logging.get_logger(__name__) lowercase : Tuple = { 'google/pix2struct-textcaps-base': ( 'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json' ), } class A ( __snake_case ): __magic_name__ = '''pix2struct_text_model''' __magic_name__ = ['''past_key_values'''] __magic_name__ = { '''hidden_size''': '''hidden_size''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , SCREAMING_SNAKE_CASE=50244 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]: """simple docstring""" A : str = vocab_size A : List[str] = hidden_size A : List[Any] = d_kv A : Optional[Any] = d_ff A : Dict = num_layers A : Dict = num_heads A : Optional[int] = relative_attention_num_buckets A : Optional[Any] = relative_attention_max_distance A : Dict = dropout_rate A : Dict = layer_norm_epsilon A : Tuple = initializer_factor A : Union[str, Any] = use_cache A : int = eos_token_id A : List[str] = decoder_start_token_id # for backwards compatibility A : int = dense_act_fn super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , is_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) A, A : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": A : Union[str, Any] = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class A ( __snake_case ): __magic_name__ = '''pix2struct_vision_model''' def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=1e-10 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : List[str] = hidden_size A : Optional[Any] = patch_embed_hidden_size A : Union[str, Any] = d_ff A : Dict = dropout_rate A : str = num_hidden_layers A : Dict = num_attention_heads A : Tuple = initializer_range A : List[str] = initializer_factor A : Union[str, Any] = attention_dropout A : Tuple = layer_norm_eps A : int = dense_act_fn A : Optional[int] = seq_len A : Tuple = relative_attention_num_buckets A : str = relative_attention_max_distance A : Optional[Any] = d_kv @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) A, A : int = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": A : Optional[Any] = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class A ( __snake_case ): __magic_name__ = '''pix2struct''' __magic_name__ = True def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) if text_config is None: A : Dict = {} logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' ) if vision_config is None: A : str = {} logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' ) A : Dict = PixaStructTextConfig(**SCREAMING_SNAKE_CASE ) A : Any = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE ) A : Any = self.text_config.decoder_start_token_id A : Any = self.text_config.pad_token_id A : Dict = self.text_config.eos_token_id A : Union[str, Any] = initializer_factor A : Tuple = initializer_range A : Optional[Any] = self.initializer_range A : int = self.initializer_range A : Tuple = is_vqa @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Tuple = copy.deepcopy(self.__dict__ ) A : Dict = self.text_config.to_dict() A : int = self.vision_config.to_dict() A : Any = self.__class__.model_type return output
311
0
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin lowercase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model') lowercase : Dict = {'target_lang': 'fi', 'source_lang': 'en'} lowercase : int = '>>zh<<' lowercase : Union[str, Any] = 'Helsinki-NLP/' if is_torch_available(): lowercase : str = 'pt' elif is_tf_available(): lowercase : Union[str, Any] = 'tf' else: lowercase : List[str] = 'jax' @require_sentencepiece class A ( __snake_case , unittest.TestCase ): __magic_name__ = MarianTokenizer __magic_name__ = False __magic_name__ = True def __lowerCAmelCase ( self ) -> Any: """simple docstring""" super().setUp() A : Union[str, Any] = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] A : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) A : Union[str, Any] = Path(self.tmpdirname ) save_json(SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['''vocab'''] ) save_json(SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['''source_spm'''] ) copyfile(SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['''target_spm'''] ) A : Dict = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> MarianTokenizer: """simple docstring""" return MarianTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" return ( "This is a test", "This is a test", ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = '''</s>''' A : Union[str, Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''</s>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''<pad>''' ) self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 9 ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : str = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' ) A : str = en_de_tokenizer(['''I am a small frog'''] , return_tensors=SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[Any] = [38, 121, 14, 697, 38848, 0] self.assertListEqual(SCREAMING_SNAKE_CASE , batch.input_ids[0] ) A : Union[str, Any] = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[Any] = [x.name for x in Path(SCREAMING_SNAKE_CASE ).glob('''*''' )] self.assertIn('''source.spm''' , SCREAMING_SNAKE_CASE ) MarianTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : List[Any] = self.get_tokenizer() A : Any = tok( ['''I am a small frog''' * 1000, '''I am a small frog'''] , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.assertEqual(batch.input_ids.shape , (2, 512) ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Dict = self.get_tokenizer() A : List[str] = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Optional[Any] = {'''input_ids''': [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : str = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' ) A : Tuple = '''Tämä on testi''' A : str = '''This is a test''' A : Tuple = [76, 7, 2047, 2] A : List[Any] = [69, 12, 11, 940, 2] A : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE ).input_ids self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Dict = tokenizer(text_target=SCREAMING_SNAKE_CASE ).input_ids self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
354
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : List[str] = 2 A : Dict = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(snake_case__ ) if n > 1: factors.append(snake_case__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
311
0
def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not nums: # Makes sure that the list is not empty raise ValueError('''List is empty''' ) A : Optional[Any] = sum(snake_case__ ) / len(snake_case__ ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
355
'''simple docstring''' # Function to print upper half of diamond (pyramid) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for i in range(0 , snake_case__ ): for _ in range(0 , n - i - 1 ): # printing spaces print(''' ''' , end='''''' ) for _ in range(0 , i + 1 ): # printing stars print('''* ''' , end='''''' ) print() def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for i in range(snake_case__ , 0 , -1 ): for _ in range(snake_case__ , 0 , -1 ): # printing stars print('''* ''' , end='''''' ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(''' ''' , end='''''' ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if n <= 0: print(''' ... .... nothing printing :(''' ) return floyd(snake_case__ ) # upper half reverse_floyd(snake_case__ ) # lower half if __name__ == "__main__": print(R'| /\ | |- | |- |--| |\ /| |-') print(R'|/ \| |- |_ |_ |__| | \/ | |_') lowercase : List[str] = 1 while K: lowercase : List[Any] = int(input('enter the number and , and see the magic : ')) print() pretty_print(user_number) lowercase : Any = int(input('press 0 to exit... and 1 to continue...')) print('Good Bye...')
311
0
'''simple docstring''' import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging lowercase = logging.get_logger(__name__) lowercase = R'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n' class A ( __snake_case ): @add_start_docstrings(SCREAMING_SNAKE_CASE ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" raise NotImplementedError('''StoppingCriteria needs to be subclassed''' ) class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Dict: """simple docstring""" A : Union[str, Any] = max_length A : List[Any] = max_position_embeddings @add_start_docstrings(SCREAMING_SNAKE_CASE ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" A : Any = input_ids.shape[-1] A : List[Any] = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( '''This is a friendly reminder - the current text generation call will exceed the model\'s predefined ''' F'maximum length ({self.max_position_embeddings}). Depending on the model, you may observe ' '''exceptions, performance degradation, or nothing at all.''' ) return is_done class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" warnings.warn( '''The class `MaxNewTokensCriteria` is deprecated. ''' F'Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` ' '''with `max_length = start_length + max_new_tokens` instead.''' , SCREAMING_SNAKE_CASE , ) A : Optional[Any] = start_length A : Optional[int] = max_new_tokens A : Optional[Any] = start_length + max_new_tokens @add_start_docstrings(SCREAMING_SNAKE_CASE ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" return input_ids.shape[-1] >= self.max_length class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[Any]: """simple docstring""" A : List[str] = max_time A : Optional[int] = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(SCREAMING_SNAKE_CASE ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" return time.time() - self.initial_timestamp > self.max_time class A ( __snake_case ): @add_start_docstrings(SCREAMING_SNAKE_CASE ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" return any(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for criteria in self ) @property def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" for stopping_criterium in self: if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return stopping_criterium.max_length elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return stopping_criterium.max_length return None def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Any = stopping_criteria.max_length A : Dict = deepcopy(snake_case__ ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , snake_case__ ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=snake_case__ ) ) return new_stopping_criteria
356
'''simple docstring''' # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__() self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" A : List[Any] = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE , ) A : Optional[Any] = image.to(self.device ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A : Tuple = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A : List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE ), "This is a local test"
311
0
'''simple docstring''' import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" debug_launcher(test_script.main ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" debug_launcher(test_ops.main )
357
'''simple docstring''' import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , ) -> str: """simple docstring""" A : Any = parent A : List[Any] = batch_size A : Union[str, Any] = seq_length A : Any = is_training A : int = use_input_mask A : Union[str, Any] = vocab_size A : List[Any] = hidden_size A : List[Any] = num_hidden_layers A : Optional[int] = num_attention_heads A : str = intermediate_size A : Tuple = hidden_act A : Union[str, Any] = hidden_dropout_prob A : Union[str, Any] = attention_probs_dropout_prob A : int = max_position_embeddings A : Optional[int] = initializer_range A : Any = use_labels A : Optional[int] = scope def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Optional[int] = None if self.use_input_mask: A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Dict = self.get_config() return config, input_ids, input_mask, token_labels def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" ( ( A ), ( A ), ( A ), ( A ), ) : Any = self.prepare_config_and_inputs() A : Tuple = True A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" A : List[str] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) A : int = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" A : List[str] = True A : Union[str, Any] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , ) A : List[Any] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = True A : Tuple = True A : Optional[int] = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval() # first forward pass A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , ) A : Optional[int] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size ) A : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and A : Dict = torch.cat([input_ids, next_tokens] , dim=-1 ) A : List[str] = torch.cat([input_mask, next_mask] , dim=-1 ) A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] A : Any = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] # select random slice A : int = ids_tensor((1,) , output_from_past.shape[-1] ).item() A : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() A : str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" A : Optional[Any] = BertGenerationDecoder(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A, A, A, A : Optional[int] = self.prepare_config_and_inputs() A : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () __magic_name__ = (BertGenerationDecoder,) if is_torch_available() else () __magic_name__ = ( {'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder} if is_torch_available() else {} ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : List[str] = BertGenerationEncoderTester(self ) A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A, A, A, A : Tuple = self.model_tester.prepare_config_and_inputs() A : str = '''bert''' self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" ( ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() A : Union[str, Any] = None self.model_tester.create_and_check_model_as_decoder( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Optional[Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Tuple = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) A : Optional[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): A : Dict = model(SCREAMING_SNAKE_CASE )[0] A : Optional[Any] = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Dict = torch.tensor( [[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[Any] = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) A : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): A : Optional[Any] = model(SCREAMING_SNAKE_CASE )[0] A : Optional[Any] = torch.Size([1, 8, 50358] ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Any = torch.tensor( [[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
311
0
'''simple docstring''' from torch import nn def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F'Unsupported activation function: {act_fn}' )
358
'''simple docstring''' import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return 1.0 / (1.0 + np.exp(-_outputs )) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[int] = np.max(_outputs , axis=-1 , keepdims=snake_case__ ) A : Any = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case__ ) class A ( __snake_case ): __magic_name__ = '''sigmoid''' __magic_name__ = '''softmax''' __magic_name__ = '''none''' @add_end_docstrings( __snake_case , R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class A ( __snake_case ): __magic_name__ = False __magic_name__ = ClassificationFunction.NONE def __init__( self , **SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="" , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Optional[Any] = tokenizer_kwargs A : int = {} if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None: A : int = self.model.config.return_all_scores if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or top_k is None: A : Union[str, Any] = top_k A : Dict = False elif return_all_scores is not None: warnings.warn( '''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of''' ''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , SCREAMING_SNAKE_CASE , ) if return_all_scores: A : Optional[int] = None else: A : Dict = 1 if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : Dict = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: A : int = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : str = super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. A : Any = '''top_k''' not in kwargs if isinstance(args[0] , SCREAMING_SNAKE_CASE ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]: """simple docstring""" A : List[Any] = self.framework if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return self.tokenizer(**SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0] , SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( '''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a''' ''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' ) return self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" return self.model(**SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=True ) -> List[str]: """simple docstring""" if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: A : Optional[int] = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: A : Any = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None: A : Optional[int] = self.model.config.function_to_apply else: A : Optional[int] = ClassificationFunction.NONE A : Any = model_outputs['''logits'''][0] A : List[Any] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: A : int = sigmoid(SCREAMING_SNAKE_CASE ) elif function_to_apply == ClassificationFunction.SOFTMAX: A : Any = softmax(SCREAMING_SNAKE_CASE ) elif function_to_apply == ClassificationFunction.NONE: A : int = outputs else: raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} A : int = [ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(SCREAMING_SNAKE_CASE ) ] if not _legacy: dict_scores.sort(key=lambda SCREAMING_SNAKE_CASE : x["score"] , reverse=SCREAMING_SNAKE_CASE ) if top_k is not None: A : Union[str, Any] = dict_scores[:top_k] return dict_scores
311
0
'''simple docstring''' from jiwer import compute_measures import datasets lowercase : Optional[int] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n' lowercase : Any = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n' lowercase : List[Any] = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self ) -> int: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', ] , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False ) -> List[str]: """simple docstring""" if concatenate_texts: return compute_measures(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )["wer"] else: A : Tuple = 0 A : Optional[int] = 0 for prediction, reference in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : Optional[int] = compute_measures(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
359
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def lowerCAmelCase_ ( snake_case__ = "laptop" ): '''simple docstring''' A : Tuple = F'https://www.amazon.in/laptop/s?k={product}' A : Optional[int] = { '''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''', '''Accept-Language''': '''en-US, en;q=0.5''', } A : Any = BeautifulSoup(requests.get(snake_case__ , headers=snake_case__ ).text ) # Initialize a Pandas dataframe with the column titles A : List[str] = DataFrame( columns=[ '''Product Title''', '''Product Link''', '''Current Price of the product''', '''Product Rating''', '''MRP of the product''', '''Discount''', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( '''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ): try: A : Optional[Any] = item.ha.text A : Union[str, Any] = '''https://www.amazon.in/''' + item.ha.a['''href'''] A : Tuple = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text try: A : int = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text except AttributeError: A : Optional[int] = '''Not available''' try: A : str = ( '''₹''' + item.find( '''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1] ) except AttributeError: A : List[Any] = '''''' try: A : Dict = float( ( ( float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) - float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) ) ) / float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) ) * 100 ) except ValueError: A : str = float('''nan''' ) except AttributeError: pass A : Union[str, Any] = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] A : List[str] = ''' ''' A : Optional[Any] = ''' ''' data_frame.index += 1 return data_frame if __name__ == "__main__": lowercase : Union[str, Any] = 'headphones' get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
311
0
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=sys.maxsize ) -> Union[str, Any]: """simple docstring""" A : Tuple = '''bilinear''' A : Optional[int] = max_size A : Dict = short_edge_length def __call__( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Tuple = [] for img in imgs: A : str = img.shape[:2] # later: provide list and randomly choose index for resize A : Union[str, Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img A : int = size * 1.0 / min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if h < w: A : Tuple = size, scale * w else: A : str = scale * h, size if max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) > self.max_size: A : List[str] = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Tuple = newh * scale A : int = neww * scale A : List[str] = int(neww + 0.5 ) A : int = int(newh + 0.5 ) if img.dtype == np.uinta: A : Dict = Image.fromarray(SCREAMING_SNAKE_CASE ) A : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) A : str = np.asarray(SCREAMING_SNAKE_CASE ) else: A : Dict = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw A : List[Any] = nn.functional.interpolate( SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE ).squeeze(0 ) img_augs.append(SCREAMING_SNAKE_CASE ) return img_augs class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) A : str = cfg.INPUT.FORMAT A : int = cfg.SIZE_DIVISIBILITY A : Optional[int] = cfg.PAD_VALUE A : Dict = cfg.INPUT.MAX_SIZE_TEST A : Optional[Any] = cfg.MODEL.DEVICE A : Dict = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) A : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) A : str = lambda SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : Union[str, Any] = tuple(max(SCREAMING_SNAKE_CASE ) for s in zip(*[img.shape for img in images] ) ) A : List[str] = [im.shape[-2:] for im in images] A : Optional[Any] = [ nn.functional.pad( SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] return torch.stack(SCREAMING_SNAKE_CASE ), torch.tensor(SCREAMING_SNAKE_CASE ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : str = [images] if single_image: assert len(SCREAMING_SNAKE_CASE ) == 1 for i in range(len(SCREAMING_SNAKE_CASE ) ): if isinstance(images[i] , torch.Tensor ): images.insert(SCREAMING_SNAKE_CASE , images.pop(SCREAMING_SNAKE_CASE ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge A : Tuple = torch.tensor([im.shape[:2] for im in images] ) A : Dict = self.aug(SCREAMING_SNAKE_CASE ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic A : Tuple = [self.normalizer(SCREAMING_SNAKE_CASE ) for x in images] # now pad them to do the following operations A : Optional[int] = self.pad(SCREAMING_SNAKE_CASE ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad A : Tuple = torch.true_divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' assert torch.isfinite(snake_case__ ).all(), "Box tensor contains infinite or NaN!" A : str = box_size tensor[:, 0].clamp_(min=0 , max=snake_case__ ) tensor[:, 1].clamp_(min=0 , max=snake_case__ ) tensor[:, 2].clamp_(min=0 , max=snake_case__ ) tensor[:, 3].clamp_(min=0 , max=snake_case__ )
360
'''simple docstring''' import colorsys from PIL import Image # type: ignore def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[int] = x A : str = y for step in range(snake_case__ ): # noqa: B007 A : str = a * a - b * b + x A : List[str] = 2 * a * b + y A : str = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1 ) ) def lowerCAmelCase_ ( snake_case__ = 800 , snake_case__ = 600 , snake_case__ = -0.6 , snake_case__ = 0 , snake_case__ = 3.2 , snake_case__ = 50 , snake_case__ = True , ): '''simple docstring''' A : List[Any] = Image.new('''RGB''' , (image_width, image_height) ) A : Tuple = img.load() # loop through the image-coordinates for image_x in range(snake_case__ ): for image_y in range(snake_case__ ): # determine the figure-coordinates based on the image-coordinates A : Optional[int] = figure_width / image_width * image_height A : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width A : List[str] = figure_center_y + (image_y / image_height - 0.5) * figure_height A : str = get_distance(snake_case__ , snake_case__ , snake_case__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: A : str = get_color_coded_rgb(snake_case__ ) else: A : List[Any] = get_black_and_white_rgb(snake_case__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure lowercase : Optional[Any] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
311
0
'''simple docstring''' import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class A ( unittest.TestCase ): def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : Optional[Any] = 3 A : List[Any] = 250 A : Tuple = ids_tensor((batch_size, length) , SCREAMING_SNAKE_CASE ) A : Tuple = torch.ones((batch_size, length) , device=SCREAMING_SNAKE_CASE , dtype=torch.float ) / length return input_ids, scores def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Any = self._get_tensors(5 ) A : Optional[Any] = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) A : Optional[int] = self._get_tensors(9 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) A : List[Any] = self._get_tensors(10 ) self.assertTrue(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : str = MaxLengthCriteria(max_length=10 ) A : int = self._get_tensors(5 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) A : List[Any] = self._get_tensors(9 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) A : Any = self._get_tensors(10 ) self.assertTrue(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : int = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) A : List[str] = self._get_tensors(5 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) A : Any = self._get_tensors(9 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) A : int = self._get_tensors(10 ) self.assertTrue(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) A : Optional[Any] = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Union[str, Any] = self._get_tensors(5 ) A : List[Any] = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) A : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(SCREAMING_SNAKE_CASE ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) A : int = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 1 )
361
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowercase : Optional[int] = [ 'kernels/rwkv/wkv_cuda.cu', 'kernels/rwkv/wkv_op.cpp', 'kernels/deformable_detr/ms_deform_attn.h', 'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh', 'models/graphormer/algos_graphormer.pyx', ] def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowercase : str = argparse.ArgumentParser() parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.') lowercase : Optional[Any] = parser.parse_args() if args.check_lib: lowercase : List[Any] = importlib.import_module('transformers') lowercase : str = Path(transformers_module.__file__).parent else: lowercase : List[Any] = Path.cwd() / 'build/lib/transformers' if not test_custom_files_are_present(transformers_path): raise ValueError('The built release does not contain the custom files. Fix this before going further!')
311
0
'''simple docstring''' import string def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for key in range(len(string.ascii_uppercase ) ): A : Any = '''''' for symbol in message: if symbol in string.ascii_uppercase: A : str = string.ascii_uppercase.find(snake_case__ ) A : int = num - key if num < 0: A : Dict = num + len(string.ascii_uppercase ) A : str = translated + string.ascii_uppercase[num] else: A : Any = translated + symbol print(F'Decryption using Key #{key}: {translated}' ) def lowerCAmelCase_ ( ): '''simple docstring''' A : List[str] = input('''Encrypted message: ''' ) A : Union[str, Any] = message.upper() decrypt(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
362
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=2 , ) -> List[str]: """simple docstring""" A : List[str] = parent A : Optional[Any] = batch_size A : Tuple = image_size A : int = patch_size A : Optional[int] = num_channels A : str = is_training A : List[Any] = use_labels A : Any = hidden_size A : Any = num_hidden_layers A : Optional[int] = num_attention_heads A : Any = intermediate_size A : List[str] = hidden_act A : str = hidden_dropout_prob A : Tuple = attention_probs_dropout_prob A : Any = type_sequence_label_size A : Optional[int] = initializer_range A : Dict = scope A : Tuple = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A : List[Any] = (image_size // patch_size) ** 2 A : Tuple = num_patches + 2 def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Tuple = None if self.use_labels: A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Tuple = self.get_config() return config, pixel_values, labels def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : Any = TFDeiTModel(config=SCREAMING_SNAKE_CASE ) A : str = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Tuple = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE ) A : List[Any] = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A : Optional[int] = 1 A : str = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE ) A : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Tuple = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" A : str = self.type_sequence_label_size A : Optional[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE ) A : Optional[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A : Optional[Any] = 1 A : List[str] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE ) A : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Optional[int] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Optional[int] = self.prepare_config_and_inputs() A, A, A : Tuple = config_and_inputs A : Any = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A ( __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) __magic_name__ = ( { '''feature-extraction''': TFDeiTModel, '''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Tuple = TFDeiTModelTester(self ) A : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" pass def __lowerCAmelCase ( self ) -> str: """simple docstring""" A, A : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Any = model_class(SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) A : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Dense ) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A, A : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Any = model_class(SCREAMING_SNAKE_CASE ) A : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple: """simple docstring""" A : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def __lowerCAmelCase ( self ) -> str: """simple docstring""" for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : List[str] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def lowerCAmelCase_ ( ): '''simple docstring''' A : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class A ( unittest.TestCase ): @cached_property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) A : Dict = self.default_image_processor A : List[str] = prepare_img() A : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) # forward pass A : Optional[int] = model(**SCREAMING_SNAKE_CASE ) # verify the logits A : List[Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE ) A : str = tf.constant([-1.0_266, 0.1_912, -1.2_861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
311
0
'''simple docstring''' from __future__ import annotations from collections import deque class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" A : list[dict] = [] self.adlist.append( {'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} ) for keyword in keywords: self.add_keyword(SCREAMING_SNAKE_CASE ) self.set_fail_transitions() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int | None: """simple docstring""" for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" A : Any = 0 for character in keyword: A : Optional[int] = self.find_next_state(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if next_state is None: self.adlist.append( { '''value''': character, '''next_states''': [], '''fail_state''': 0, '''output''': [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) A : List[Any] = len(self.adlist ) - 1 else: A : Dict = next_state self.adlist[current_state]["output"].append(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> None: """simple docstring""" A : deque = deque() for node in self.adlist[0]["next_states"]: q.append(SCREAMING_SNAKE_CASE ) A : Optional[int] = 0 while q: A : Tuple = q.popleft() for child in self.adlist[r]["next_states"]: q.append(SCREAMING_SNAKE_CASE ) A : str = self.adlist[r]['''fail_state'''] while ( self.find_next_state(SCREAMING_SNAKE_CASE , self.adlist[child]['''value'''] ) is None and state != 0 ): A : int = self.adlist[state]['''fail_state'''] A : Union[str, Any] = self.find_next_state( SCREAMING_SNAKE_CASE , self.adlist[child]['''value'''] ) if self.adlist[child]["fail_state"] is None: A : Union[str, Any] = 0 A : str = ( self.adlist[child]['''output'''] + self.adlist[self.adlist[child]['''fail_state''']]['''output'''] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> dict[str, list[int]]: """simple docstring""" A : dict = {} # returns a dict with keywords and list of its occurrences A : Any = 0 for i in range(len(SCREAMING_SNAKE_CASE ) ): while ( self.find_next_state(SCREAMING_SNAKE_CASE , string[i] ) is None and current_state != 0 ): A : Optional[Any] = self.adlist[current_state]['''fail_state'''] A : Optional[int] = self.find_next_state(SCREAMING_SNAKE_CASE , string[i] ) if next_state is None: A : List[Any] = 0 else: A : Any = next_state for key in self.adlist[current_state]["output"]: if key not in result: A : Any = [] result[key].append(i - len(SCREAMING_SNAKE_CASE ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
363
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase : List[str] = { 'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'], 'tokenization_cpmant': ['CpmAntTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ 'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST', 'CpmAntForCausalLM', 'CpmAntModel', 'CpmAntPreTrainedModel', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
311
0
'''simple docstring''' import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = SwinConfig() A : List[Any] = swin_name.split('''_''' ) A : Tuple = name_split[1] A : Union[str, Any] = int(name_split[4] ) A : str = int(name_split[3][-1] ) if model_size == "tiny": A : Optional[int] = 96 A : Optional[Any] = (2, 2, 6, 2) A : Any = (3, 6, 12, 24) elif model_size == "small": A : Optional[int] = 96 A : str = (2, 2, 18, 2) A : Tuple = (3, 6, 12, 24) elif model_size == "base": A : int = 128 A : Optional[Any] = (2, 2, 18, 2) A : List[str] = (4, 8, 16, 32) else: A : Dict = 192 A : Optional[Any] = (2, 2, 18, 2) A : Optional[Any] = (6, 12, 24, 48) if "in22k" in swin_name: A : Dict = 2_1841 else: A : str = 1000 A : List[str] = '''huggingface/label-files''' A : Any = '''imagenet-1k-id2label.json''' A : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) ) A : str = {int(snake_case__ ): v for k, v in idalabel.items()} A : Tuple = idalabel A : Tuple = {v: k for k, v in idalabel.items()} A : Tuple = img_size A : Dict = num_classes A : Optional[Any] = embed_dim A : str = depths A : str = num_heads A : Optional[int] = window_size return config def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if "patch_embed.proj" in name: A : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: A : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: A : Optional[int] = '''encoder.''' + name if "attn.proj" in name: A : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: A : List[str] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: A : Any = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: A : Tuple = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: A : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: A : str = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "norm.weight": A : Tuple = '''layernorm.weight''' if name == "norm.bias": A : Tuple = '''layernorm.bias''' if "head" in name: A : Any = name.replace('''head''' , '''classifier''' ) else: A : List[Any] = '''swin.''' + name return name def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): A : Dict = orig_state_dict.pop(snake_case__ ) if "mask" in key: continue elif "qkv" in key: A : Dict = key.split('''.''' ) A : Optional[int] = int(key_split[1] ) A : List[str] = int(key_split[3] ) A : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: A : Any = val[:dim, :] A : Dict = val[ dim : dim * 2, : ] A : List[str] = val[-dim:, :] else: A : Any = val[ :dim ] A : Optional[int] = val[ dim : dim * 2 ] A : Any = val[ -dim: ] else: A : str = val return orig_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Tuple = timm.create_model(snake_case__ , pretrained=snake_case__ ) timm_model.eval() A : Optional[Any] = get_swin_config(snake_case__ ) A : Optional[int] = SwinForImageClassification(snake_case__ ) model.eval() A : List[str] = convert_state_dict(timm_model.state_dict() , snake_case__ ) model.load_state_dict(snake_case__ ) A : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A : Any = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) ) A : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) A : List[Any] = image_processor(images=snake_case__ , return_tensors='''pt''' ) A : Any = timm_model(inputs['''pixel_values'''] ) A : Optional[Any] = model(**snake_case__ ).logits assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case__ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swin_name', default='swin_tiny_patch4_window7_224', type=str, help='Name of the Swin timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowercase : int = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
364
'''simple docstring''' from __future__ import annotations lowercase : Union[str, Any] = list[tuple[int, int]] lowercase : Optional[Any] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase : Any = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" A : int = pos_x A : Optional[Any] = pos_y A : Optional[Any] = (pos_y, pos_x) A : str = goal_x A : Optional[int] = goal_y A : List[Any] = g_cost A : str = parent A : str = self.calculate_heuristic() def __lowerCAmelCase ( self ) -> float: """simple docstring""" A : Optional[int] = abs(self.pos_x - self.goal_x ) A : Optional[Any] = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" return self.f_cost < other.f_cost class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE ) A : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE ) A : Optional[Any] = [self.start] A : list[Node] = [] A : Tuple = False def __lowerCAmelCase ( self ) -> Path | None: """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() A : Optional[int] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: A : Optional[int] = True return self.retrace_path(SCREAMING_SNAKE_CASE ) self.closed_nodes.append(SCREAMING_SNAKE_CASE ) A : Any = self.get_successors(SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: # retrieve the best current path A : str = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(SCREAMING_SNAKE_CASE ) if not self.reached: return [self.start.pos] return None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[Node]: """simple docstring""" A : List[Any] = [] for action in delta: A : List[str] = parent.pos_x + action[1] A : Dict = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE , ) ) return successors def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Path: """simple docstring""" A : int = node A : Union[str, Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) A : int = current_node.parent path.reverse() return path if __name__ == "__main__": lowercase : Tuple = (0, 0) lowercase : List[str] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') lowercase : int = GreedyBestFirst(init, goal) lowercase : Union[str, Any] = greedy_bf.search() if path: for pos_x, pos_y in path: lowercase : Dict = 2 for elem in grid: print(elem)
311
0
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ): '''simple docstring''' A : Optional[int] = {} if train_file is not None: A : int = [train_file] if eval_file is not None: A : List[str] = [eval_file] if test_file is not None: A : Optional[Any] = [test_file] A : Union[str, Any] = datasets.load_dataset('''csv''' , data_files=snake_case__ ) A : int = list(ds[list(files.keys() )[0]].features.keys() ) A : Optional[int] = features_name.pop(snake_case__ ) A : List[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) ) A : Dict = {label: i for i, label in enumerate(snake_case__ )} A : List[str] = tokenizer.model_input_names A : List[Any] = {} if len(snake_case__ ) == 1: for k in files.keys(): A : Any = ds[k].map( lambda snake_case__ : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=snake_case__ , max_length=snake_case__ , padding='''max_length''' ) , batched=snake_case__ , ) elif len(snake_case__ ) == 2: for k in files.keys(): A : Tuple = ds[k].map( lambda snake_case__ : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=snake_case__ , max_length=snake_case__ , padding='''max_length''' , ) , batched=snake_case__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: A : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names} A : Dict = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: A : List[str] = {k: v for k, v in ex.items() if k in input_names} A : Optional[Any] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: A : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names} A : Tuple = labelaid[ex[label_name]] yield (d, label) A : int = ( tf.data.Dataset.from_generator( snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: A : int = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) A : Union[str, Any] = ( tf.data.Dataset.from_generator( snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: A : str = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) A : Union[str, Any] = ( tf.data.Dataset.from_generator( snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: A : Optional[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid lowercase : List[str] = logging.getLogger(__name__) @dataclass class A : __magic_name__ = field(metadata={'''help''': '''Which column contains the label'''} ) __magic_name__ = field(default=__snake_case , metadata={'''help''': '''The path of the training file'''} ) __magic_name__ = field(default=__snake_case , metadata={'''help''': '''The path of the development file'''} ) __magic_name__ = field(default=__snake_case , metadata={'''help''': '''The path of the test file'''} ) __magic_name__ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) __magic_name__ = field( default=__snake_case , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class A : __magic_name__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) __magic_name__ = field( default=__snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __magic_name__ = field( default=__snake_case , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) __magic_name__ = field(default=__snake_case , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. __magic_name__ = field( default=__snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) def lowerCAmelCase_ ( ): '''simple docstring''' A : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) A : Any = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , ) logger.info( F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ' F'16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A : Optional[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) A : List[Any] = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=snake_case__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) A : Union[str, Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(snake_case__ ) , labelaid=snake_case__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): A : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , ) def compute_metrics(snake_case__ ) -> Dict: A : Optional[int] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer A : Optional[int] = TFTrainer( model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation A : Any = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) A : int = trainer.evaluate() A : str = os.path.join(training_args.output_dir , '''eval_results.txt''' ) with open(snake_case__ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(F' {key} = {value}' ) writer.write(F'{key} = {value}\n' ) results.update(snake_case__ ) return results if __name__ == "__main__": main()
365
'''simple docstring''' import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py lowercase : Any = 'src/transformers' lowercase : str = 'docs/source/en/tasks' def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: A : Union[str, Any] = f.readlines() # Find the start prompt. A : List[Any] = 0 while not lines[start_index].startswith(snake_case__ ): start_index += 1 start_index += 1 A : List[str] = start_index while not lines[end_index].startswith(snake_case__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. lowercase : int = direct_transformers_import(TRANSFORMERS_PATH) lowercase : str = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). lowercase : Optional[int] = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : int = TASK_GUIDE_TO_MODELS[task_guide] A : List[str] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() ) A : Union[str, Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n" def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A, A, A, A : Optional[int] = _find_text_in_file( filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , ) A : Optional[int] = get_model_list_for_task(snake_case__ ) if current_list != new_list: if overwrite: with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`' ''' to fix this.''' ) if __name__ == "__main__": lowercase : Dict = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') lowercase : List[Any] = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
311
0
'''simple docstring''' from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup lowercase : List[str] = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l=' def lowerCAmelCase_ ( snake_case__ = "mumbai" ): '''simple docstring''' A : Optional[int] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' ) # This attribute finds out all the specifics listed in a job for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ): A : str = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip() A : Union[str, Any] = job.find('''span''' , {'''class''': '''company'''} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs('Bangalore'), 1): print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
366
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if len(snake_case__ ) <= 1: return [tuple(snake_case__ )] A : Tuple = [] def generate(snake_case__ , snake_case__ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , snake_case__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A, A : Optional[Any] = arr[k - 1], arr[i] else: # k is odd A, A : Optional[Any] = arr[k - 1], arr[0] generate(k - 1 , snake_case__ ) generate(len(snake_case__ ) , snake_case__ ) return res if __name__ == "__main__": lowercase : List[str] = input('Enter numbers separated by a comma:\n').strip() lowercase : int = [int(item) for item in user_input.split(',')] print(heaps(arr))
311
0
'''simple docstring''' from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def lowerCAmelCase_ ( ): '''simple docstring''' A : Dict = [randint(-1000 , 1000 ) for i in range(10 )] A : Dict = randint(-5000 , 5000 ) return (arr, r) lowercase : Any = make_dataset() def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' for triplet in permutations(snake_case__ , 3 ): if sum(snake_case__ ) == target: return tuple(sorted(snake_case__ ) ) return (0, 0, 0) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' arr.sort() A : Tuple = len(snake_case__ ) for i in range(n - 1 ): A : Optional[Any] = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def lowerCAmelCase_ ( ): '''simple docstring''' A : Optional[Any] = ''' from __main__ import dataset, triplet_sum1, triplet_sum2 ''' A : str = ''' triplet_sum1(*dataset) ''' A : List[Any] = ''' triplet_sum2(*dataset) ''' A : Any = repeat(setup=snake_case__ , stmt=snake_case__ , repeat=5 , number=1_0000 ) A : Dict = repeat(setup=snake_case__ , stmt=snake_case__ , repeat=5 , number=1_0000 ) return (min(snake_case__ ), min(snake_case__ )) if __name__ == "__main__": from doctest import testmod testmod() lowercase : List[str] = solution_times() print(f'''The time for naive implementation is {times[0]}.''') print(f'''The time for optimized implementation is {times[1]}.''')
367
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A ( __snake_case ): __magic_name__ = (UniPCMultistepScheduler,) __magic_name__ = (('''num_inference_steps''', 25),) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : str = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''solver_type''': '''bh2''', } config.update(**SCREAMING_SNAKE_CASE ) return config def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : List[Any] = dict(self.forward_default_kwargs ) A : Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) A : Optional[Any] = self.dummy_sample A : int = 0.1 * sample A : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals A : List[Any] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE ) A : List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE ) new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals A : Dict = dummy_past_residuals[: new_scheduler.config.solver_order] A, A : Tuple = sample, sample for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ): A : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Optional[Any] = dict(self.forward_default_kwargs ) A : Tuple = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) A : List[Any] = self.dummy_sample A : int = 0.1 * sample A : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A : Optional[int] = self.get_scheduler_config() A : Any = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals (must be after setting timesteps) A : int = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE ) A : int = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residual (must be after setting timesteps) A : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order] A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" if scheduler is None: A : Dict = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Tuple = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : int = 10 A : Tuple = self.dummy_model() A : Any = self.dummy_sample_deter scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): A : int = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample return sample def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = dict(self.forward_default_kwargs ) A : List[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config() A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Optional[Any] = self.dummy_sample A : Optional[int] = 0.1 * sample if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ): scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ): A : Tuple = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A : Dict = [residual + 0.2, residual + 0.15, residual + 0.10] A : List[str] = dummy_past_residuals[: scheduler.config.solver_order] A : List[Any] = scheduler.timesteps[5] A : Dict = scheduler.timesteps[6] A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() ) A : List[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE ) A : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 A : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config ) A : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config ) A : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config ) A : Optional[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE ) A : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , ) A : Dict = self.full_loop( solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , ) assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers" def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE ) self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : int = self.full_loop() A : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : List[Any] = self.full_loop(prediction_type='''v_prediction''' ) A : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.1_014 ) < 1e-3 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Dict = self.scheduler_classes[0] A : List[Any] = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 ) A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Tuple = 10 A : Union[str, Any] = self.dummy_model() A : Dict = self.dummy_sample_deter.half() scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): A : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample assert sample.dtype == torch.floataa def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
311
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ = 1000 ): '''simple docstring''' A : Optional[int] = 2**power A : str = 0 while n: A : Tuple = r + n % 10, n // 10 return r if __name__ == "__main__": print(solution(int(str(input()).strip())))
368
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM A : Dict = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE ): A : List[Any] = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: A : Optional[int] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size: raise ValueError( F'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch' F' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) A : str = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A : Any = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A : int = self.scheduler.step( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , use_clipped_model_output=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample A : Dict = (image / 2 + 0.5).clamp(0 , 1 ) A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A : int = self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
311
0
'''simple docstring''' import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A ( __snake_case , unittest.TestCase ): __magic_name__ = AudioLDMPipeline __magic_name__ = TEXT_TO_AUDIO_PARAMS __magic_name__ = TEXT_TO_AUDIO_BATCH_PARAMS __magic_name__ = frozenset( [ '''num_inference_steps''', '''num_waveforms_per_prompt''', '''generator''', '''latents''', '''output_type''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" torch.manual_seed(0 ) A : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=SCREAMING_SNAKE_CASE , ) A : str = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , ) torch.manual_seed(0 ) A : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) A : List[Any] = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , ) A : List[str] = ClapTextModelWithProjection(SCREAMING_SNAKE_CASE ) A : int = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77 ) A : int = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=SCREAMING_SNAKE_CASE , ) A : List[str] = SpeechTaHifiGan(SCREAMING_SNAKE_CASE ) A : List[str] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''vocoder''': vocoder, } return components def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ) -> List[Any]: """simple docstring""" if str(SCREAMING_SNAKE_CASE ).startswith('''mps''' ): A : Any = torch.manual_seed(SCREAMING_SNAKE_CASE ) else: A : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE ) A : Dict = { '''prompt''': '''A hammer hitting a wooden surface''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, } return inputs def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator A : Union[str, Any] = self.get_dummy_components() A : str = AudioLDMPipeline(**SCREAMING_SNAKE_CASE ) A : Any = audioldm_pipe.to(SCREAMING_SNAKE_CASE ) audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) A : Any = self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) A : int = audioldm_pipe(**SCREAMING_SNAKE_CASE ) A : Any = output.audios[0] assert audio.ndim == 1 assert len(SCREAMING_SNAKE_CASE ) == 256 A : Optional[Any] = audio[:10] A : int = np.array( [-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Union[str, Any] = self.get_dummy_components() A : Tuple = AudioLDMPipeline(**SCREAMING_SNAKE_CASE ) A : Dict = audioldm_pipe.to(SCREAMING_SNAKE_CASE ) A : Tuple = audioldm_pipe.to(SCREAMING_SNAKE_CASE ) audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) A : Any = self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) A : Optional[int] = 3 * [inputs['''prompt''']] # forward A : Dict = audioldm_pipe(**SCREAMING_SNAKE_CASE ) A : List[str] = output.audios[0] A : Dict = self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) A : Optional[Any] = 3 * [inputs.pop('''prompt''' )] A : Any = audioldm_pipe.tokenizer( SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE , return_tensors='''pt''' , ) A : List[Any] = text_inputs['''input_ids'''].to(SCREAMING_SNAKE_CASE ) A : str = audioldm_pipe.text_encoder( SCREAMING_SNAKE_CASE , ) A : Any = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state A : Optional[int] = F.normalize(SCREAMING_SNAKE_CASE , dim=-1 ) A : Optional[int] = prompt_embeds # forward A : Tuple = audioldm_pipe(**SCREAMING_SNAKE_CASE ) A : Tuple = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : List[Any] = self.get_dummy_components() A : Optional[int] = AudioLDMPipeline(**SCREAMING_SNAKE_CASE ) A : Optional[Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE ) audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) A : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) A : Dict = 3 * ['''this is a negative prompt'''] A : Dict = negative_prompt A : str = 3 * [inputs['''prompt''']] # forward A : List[str] = audioldm_pipe(**SCREAMING_SNAKE_CASE ) A : str = output.audios[0] A : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) A : List[Any] = 3 * [inputs.pop('''prompt''' )] A : str = [] for p in [prompt, negative_prompt]: A : Union[str, Any] = audioldm_pipe.tokenizer( SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE , return_tensors='''pt''' , ) A : Any = text_inputs['''input_ids'''].to(SCREAMING_SNAKE_CASE ) A : Any = audioldm_pipe.text_encoder( SCREAMING_SNAKE_CASE , ) A : Any = text_embeds.text_embeds # additional L_2 normalization over each hidden-state A : Any = F.normalize(SCREAMING_SNAKE_CASE , dim=-1 ) embeds.append(SCREAMING_SNAKE_CASE ) A : Tuple = embeds # forward A : Optional[int] = audioldm_pipe(**SCREAMING_SNAKE_CASE ) A : List[str] = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator A : Union[str, Any] = self.get_dummy_components() A : Tuple = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE ) A : Optional[Any] = AudioLDMPipeline(**SCREAMING_SNAKE_CASE ) A : Optional[Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE ) audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) A : Any = self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) A : Optional[int] = '''egg cracking''' A : Any = audioldm_pipe(**SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE ) A : str = output.audios[0] assert audio.ndim == 1 assert len(SCREAMING_SNAKE_CASE ) == 256 A : Dict = audio[:10] A : Dict = np.array( [-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator A : Optional[int] = self.get_dummy_components() A : Tuple = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE ) A : Optional[Any] = AudioLDMPipeline(**SCREAMING_SNAKE_CASE ) A : Tuple = audioldm_pipe.to(SCREAMING_SNAKE_CASE ) audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) A : Optional[Any] = '''A hammer hitting a wooden surface''' # test num_waveforms_per_prompt=1 (default) A : Any = audioldm_pipe(SCREAMING_SNAKE_CASE , num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts A : Dict = 2 A : Any = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt A : int = 2 A : List[Any] = audioldm_pipe(SCREAMING_SNAKE_CASE , num_inference_steps=2 , num_waveforms_per_prompt=SCREAMING_SNAKE_CASE ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts A : Optional[int] = 2 A : int = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=SCREAMING_SNAKE_CASE ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator A : List[str] = self.get_dummy_components() A : str = AudioLDMPipeline(**SCREAMING_SNAKE_CASE ) A : List[str] = audioldm_pipe.to(SCREAMING_SNAKE_CASE ) audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) A : Any = audioldm_pipe.vocoder.config.sampling_rate A : Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) A : Optional[Any] = audioldm_pipe(audio_length_in_s=0.016 , **SCREAMING_SNAKE_CASE ) A : Tuple = output.audios[0] assert audio.ndim == 1 assert len(SCREAMING_SNAKE_CASE ) / vocoder_sampling_rate == 0.016 A : List[Any] = audioldm_pipe(audio_length_in_s=0.032 , **SCREAMING_SNAKE_CASE ) A : int = output.audios[0] assert audio.ndim == 1 assert len(SCREAMING_SNAKE_CASE ) / vocoder_sampling_rate == 0.032 def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = self.get_dummy_components() A : List[Any] = AudioLDMPipeline(**SCREAMING_SNAKE_CASE ) A : Optional[int] = audioldm_pipe.to(SCREAMING_SNAKE_CASE ) audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) A : Optional[Any] = ['''hey'''] A : str = audioldm_pipe(SCREAMING_SNAKE_CASE , num_inference_steps=1 ) A : List[Any] = output.audios.shape assert audio_shape == (1, 256) A : Dict = audioldm_pipe.vocoder.config config.model_in_dim *= 2 A : Union[str, Any] = SpeechTaHifiGan(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) A : str = audioldm_pipe(SCREAMING_SNAKE_CASE , num_inference_steps=1 ) A : int = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" self._test_inference_batch_single_identical(test_mean_pixel_difference=SCREAMING_SNAKE_CASE ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE ) @slow class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="cpu" , SCREAMING_SNAKE_CASE=torch.floataa , SCREAMING_SNAKE_CASE=0 ) -> Tuple: """simple docstring""" A : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE ) A : Optional[Any] = np.random.RandomState(SCREAMING_SNAKE_CASE ).standard_normal((1, 8, 128, 16) ) A : Optional[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ) A : Dict = { '''prompt''': '''A hammer hitting a wooden surface''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 2.5, } return inputs def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Optional[int] = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' ) A : int = audioldm_pipe.to(SCREAMING_SNAKE_CASE ) audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = self.get_inputs(SCREAMING_SNAKE_CASE ) A : Any = 25 A : int = audioldm_pipe(**SCREAMING_SNAKE_CASE ).audios[0] assert audio.ndim == 1 assert len(SCREAMING_SNAKE_CASE ) == 81920 A : int = audio[77230:77240] A : str = np.array( [-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] ) A : List[str] = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1e-2 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : str = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' ) A : List[str] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) A : List[str] = audioldm_pipe.to(SCREAMING_SNAKE_CASE ) audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) A : List[str] = self.get_inputs(SCREAMING_SNAKE_CASE ) A : Dict = audioldm_pipe(**SCREAMING_SNAKE_CASE ).audios[0] assert audio.ndim == 1 assert len(SCREAMING_SNAKE_CASE ) == 81920 A : Optional[int] = audio[27780:27790] A : Tuple = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] ) A : Union[str, Any] = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3e-2
369
'''simple docstring''' from __future__ import annotations from random import random class A : def __init__( self , SCREAMING_SNAKE_CASE = None ) -> Tuple: """simple docstring""" A : Optional[Any] = value A : Any = random() A : Node | None = None A : Node | None = None def __repr__( self ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return F'\'{self.value}: {self.prior:.5}\'' else: return pformat( {F'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 ) def __str__( self ) -> str: """simple docstring""" A : Optional[Any] = str(self.value ) + ''' ''' A : Union[str, Any] = str(self.left or '''''' ) A : Any = str(self.right or '''''' ) return value + left + right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: A, A : Any = split(root.left , snake_case__ ) return left, root else: A, A : Optional[int] = split(root.right , snake_case__ ) return root, right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: A : List[str] = merge(left.right , snake_case__ ) return left else: A : Tuple = merge(snake_case__ , right.left ) return right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : List[Any] = Node(snake_case__ ) A, A : Tuple = split(snake_case__ , snake_case__ ) return merge(merge(snake_case__ , snake_case__ ) , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A, A : Dict = split(snake_case__ , value - 1 ) A, A : Any = split(snake_case__ , snake_case__ ) return merge(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not root: # None return else: inorder(root.left ) print(root.value , end=''',''' ) inorder(root.right ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' for arg in args.split(): if arg[0] == "+": A : int = insert(snake_case__ , int(arg[1:] ) ) elif arg[0] == "-": A : int = erase(snake_case__ , int(arg[1:] ) ) else: print('''Unknown command''' ) return root def lowerCAmelCase_ ( ): '''simple docstring''' A : Union[str, Any] = None print( '''enter numbers to create a tree, + value to add value into treap, ''' '''- value to erase all nodes with value. \'q\' to quit. ''' ) A : Optional[int] = input() while args != "q": A : str = interact_treap(snake_case__ , snake_case__ ) print(snake_case__ ) A : Union[str, Any] = input() print('''good by!''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
311
0
'''simple docstring''' from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean lowercase : Union[str, Any] = 0 lowercase : str = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase : List[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right lowercase : List[str] = tuple[int, int] class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" A : Union[str, Any] = pos_x A : List[str] = pos_y A : int = (pos_y, pos_x) A : Optional[Any] = goal_x A : Any = goal_y A : int = g_cost A : List[str] = parent A : List[Any] = self.calculate_heuristic() A : Any = self.g_cost + self.h_cost def __lowerCAmelCase ( self ) -> float: """simple docstring""" A : Optional[Any] = self.pos_x - self.goal_x A : Dict = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(SCREAMING_SNAKE_CASE ) + abs(SCREAMING_SNAKE_CASE ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self , SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" return self.f_cost < other.f_cost class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE ) A : Optional[int] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE ) A : Tuple = [self.start] A : list[Node] = [] A : Union[str, Any] = False def __lowerCAmelCase ( self ) -> list[TPosition]: """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() A : List[str] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(SCREAMING_SNAKE_CASE ) self.closed_nodes.append(SCREAMING_SNAKE_CASE ) A : Optional[int] = self.get_successors(SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: # retrieve the best current path A : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(SCREAMING_SNAKE_CASE ) return [self.start.pos] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[Node]: """simple docstring""" A : str = [] for action in delta: A : Any = parent.pos_x + action[1] A : str = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE , ) ) return successors def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[TPosition]: """simple docstring""" A : str = node A : str = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) A : Dict = current_node.parent path.reverse() return path class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" A : int = AStar(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Dict = AStar(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[Any] = False def __lowerCAmelCase ( self ) -> list[TPosition]: """simple docstring""" while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() A : Dict = self.fwd_astar.open_nodes.pop(0 ) A : Optional[Any] = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.fwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE ) self.bwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE ) A : Optional[Any] = current_bwd_node A : int = current_fwd_node A : Optional[int] = { self.fwd_astar: self.fwd_astar.get_successors(SCREAMING_SNAKE_CASE ), self.bwd_astar: self.bwd_astar.get_successors(SCREAMING_SNAKE_CASE ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(SCREAMING_SNAKE_CASE ) else: # retrieve the best current path A : str = astar.open_nodes.pop( astar.open_nodes.index(SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(SCREAMING_SNAKE_CASE ) else: astar.open_nodes.append(SCREAMING_SNAKE_CASE ) return [self.fwd_astar.start.pos] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list[TPosition]: """simple docstring""" A : Tuple = self.fwd_astar.retrace_path(SCREAMING_SNAKE_CASE ) A : int = self.bwd_astar.retrace_path(SCREAMING_SNAKE_CASE ) bwd_path.pop() bwd_path.reverse() A : Union[str, Any] = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] lowercase : Union[str, Any] = (0, 0) lowercase : int = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) lowercase : str = time.time() lowercase : int = AStar(init, goal) lowercase : int = a_star.search() lowercase : str = time.time() - start_time print(f'''AStar execution time = {end_time:f} seconds''') lowercase : List[Any] = time.time() lowercase : str = BidirectionalAStar(init, goal) lowercase : Optional[int] = time.time() - bd_start_time print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
370
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=sys.maxsize ) -> Union[str, Any]: """simple docstring""" A : Tuple = '''bilinear''' A : Optional[int] = max_size A : Dict = short_edge_length def __call__( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Tuple = [] for img in imgs: A, A : str = img.shape[:2] # later: provide list and randomly choose index for resize A : Union[str, Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img A : int = size * 1.0 / min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if h < w: A, A : Tuple = size, scale * w else: A, A : str = scale * h, size if max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) > self.max_size: A : List[str] = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Tuple = newh * scale A : int = neww * scale A : List[str] = int(neww + 0.5 ) A : int = int(newh + 0.5 ) if img.dtype == np.uinta: A : Dict = Image.fromarray(SCREAMING_SNAKE_CASE ) A : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) A : str = np.asarray(SCREAMING_SNAKE_CASE ) else: A : Dict = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw A : List[Any] = nn.functional.interpolate( SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE ).squeeze(0 ) img_augs.append(SCREAMING_SNAKE_CASE ) return img_augs class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) A : str = cfg.INPUT.FORMAT A : int = cfg.SIZE_DIVISIBILITY A : Optional[int] = cfg.PAD_VALUE A : Dict = cfg.INPUT.MAX_SIZE_TEST A : Optional[Any] = cfg.MODEL.DEVICE A : Dict = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) A : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) A : str = lambda SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : Union[str, Any] = tuple(max(SCREAMING_SNAKE_CASE ) for s in zip(*[img.shape for img in images] ) ) A : List[str] = [im.shape[-2:] for im in images] A : Optional[Any] = [ nn.functional.pad( SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] return torch.stack(SCREAMING_SNAKE_CASE ), torch.tensor(SCREAMING_SNAKE_CASE ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : str = [images] if single_image: assert len(SCREAMING_SNAKE_CASE ) == 1 for i in range(len(SCREAMING_SNAKE_CASE ) ): if isinstance(images[i] , torch.Tensor ): images.insert(SCREAMING_SNAKE_CASE , images.pop(SCREAMING_SNAKE_CASE ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge A : Tuple = torch.tensor([im.shape[:2] for im in images] ) A : Dict = self.aug(SCREAMING_SNAKE_CASE ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic A : Tuple = [self.normalizer(SCREAMING_SNAKE_CASE ) for x in images] # now pad them to do the following operations A, A : Optional[int] = self.pad(SCREAMING_SNAKE_CASE ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad A : Tuple = torch.true_divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' assert torch.isfinite(snake_case__ ).all(), "Box tensor contains infinite or NaN!" A, A : str = box_size tensor[:, 0].clamp_(min=0 , max=snake_case__ ) tensor[:, 1].clamp_(min=0 , max=snake_case__ ) tensor[:, 2].clamp_(min=0 , max=snake_case__ ) tensor[:, 3].clamp_(min=0 , max=snake_case__ )
311
0
lowercase : Tuple = '0.21.0' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
371
'''simple docstring''' import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": lowercase : Tuple = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '--original_config_file', default=None, type=str, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--scheduler_type', default='pndm', type=str, help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']', ) parser.add_argument( '--pipeline_type', default=None, type=str, help=( 'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'' '. If `None` pipeline will be automatically inferred.' ), ) parser.add_argument( '--image_size', default=None, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--prediction_type', default=None, type=str, help=( 'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable' ' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') parser.add_argument( '--stable_unclip', type=str, default=None, required=False, help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.', ) parser.add_argument( '--stable_unclip_prior', type=str, default=None, required=False, help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.', ) parser.add_argument( '--clip_stats_path', type=str, help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.', required=False, ) parser.add_argument( '--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.' ) parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--vae_path', type=str, default=None, required=False, help='Set to a path, hub id to an already converted vae to not convert it again.', ) lowercase : Tuple = parser.parse_args() lowercase : Union[str, Any] = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
311
0
'''simple docstring''' import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig lowercase : Tuple = logging.get_logger(__name__) lowercase : Dict = 'T5Config' def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : List[Any] = jnp.zeros_like(snake_case__ ) A : Dict = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) A : Optional[Any] = shifted_input_ids.at[:, 0].set(snake_case__ ) A : Union[str, Any] = jnp.where(shifted_input_ids == -100 , snake_case__ , snake_case__ ) return shifted_input_ids class A ( __snake_case ): __magic_name__ = '''mt5''' __magic_name__ = MTaConfig class A ( __snake_case ): __magic_name__ = '''mt5''' __magic_name__ = MTaConfig class A ( __snake_case ): __magic_name__ = '''mt5''' __magic_name__ = MTaConfig
350
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal lowercase : str = datasets.utils.logging.get_logger(__name__) lowercase : Union[str, Any] = ['names', 'prefix'] lowercase : Union[str, Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] lowercase : List[Any] = ['encoding_errors', 'on_bad_lines'] lowercase : Any = ['date_format'] @dataclass class A ( datasets.BuilderConfig ): __magic_name__ = "," __magic_name__ = None __magic_name__ = "infer" __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = False __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = True __magic_name__ = False __magic_name__ = True __magic_name__ = None __magic_name__ = "." __magic_name__ = None __magic_name__ = '"' __magic_name__ = 0 __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = True __magic_name__ = 0 __magic_name__ = True __magic_name__ = False __magic_name__ = None __magic_name__ = 10000 __magic_name__ = None __magic_name__ = "strict" __magic_name__ = "error" __magic_name__ = None def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" if self.delimiter is not None: A : Optional[Any] = self.delimiter if self.column_names is not None: A : Optional[Any] = self.column_names @property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : str = { '''sep''': self.sep, '''header''': self.header, '''names''': self.names, '''index_col''': self.index_col, '''usecols''': self.usecols, '''prefix''': self.prefix, '''mangle_dupe_cols''': self.mangle_dupe_cols, '''engine''': self.engine, '''converters''': self.converters, '''true_values''': self.true_values, '''false_values''': self.false_values, '''skipinitialspace''': self.skipinitialspace, '''skiprows''': self.skiprows, '''nrows''': self.nrows, '''na_values''': self.na_values, '''keep_default_na''': self.keep_default_na, '''na_filter''': self.na_filter, '''verbose''': self.verbose, '''skip_blank_lines''': self.skip_blank_lines, '''thousands''': self.thousands, '''decimal''': self.decimal, '''lineterminator''': self.lineterminator, '''quotechar''': self.quotechar, '''quoting''': self.quoting, '''escapechar''': self.escapechar, '''comment''': self.comment, '''encoding''': self.encoding, '''dialect''': self.dialect, '''error_bad_lines''': self.error_bad_lines, '''warn_bad_lines''': self.warn_bad_lines, '''skipfooter''': self.skipfooter, '''doublequote''': self.doublequote, '''memory_map''': self.memory_map, '''float_precision''': self.float_precision, '''chunksize''': self.chunksize, '''encoding_errors''': self.encoding_errors, '''on_bad_lines''': self.on_bad_lines, '''date_format''': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class A ( datasets.ArrowBasedBuilder ): __magic_name__ = CsvConfig def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) A : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(SCREAMING_SNAKE_CASE , (str, list, tuple) ): A : str = data_files if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : int = [files] A : Optional[int] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] A : Tuple = [] for split_name, files in data_files.items(): if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : List[str] = [files] A : List[str] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files] splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) ) return splits def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> pa.Table: """simple docstring""" if self.config.features is not None: A : Optional[int] = self.config.features.arrow_schema if all(not require_storage_cast(SCREAMING_SNAKE_CASE ) for feature in self.config.features.values() ): # cheaper cast A : List[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE ) else: # more expensive cast; allows str <-> int/float or str to Audio for example A : int = table_cast(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return pa_table def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" A : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str A : int = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE ) ): A : Union[str, Any] = pd.read_csv(SCREAMING_SNAKE_CASE , iterator=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE ): A : Dict = pa.Table.from_pandas(SCREAMING_SNAKE_CASE ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE ) except ValueError as e: logger.error(F'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE )}: {e}' ) raise
311
0
'''simple docstring''' import math def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[Any] = 0 A : Union[str, Any] = 0 while num > 0: A : int = num % 8 A : Optional[Any] = octal + (remainder * math.floor(math.pow(10 , snake_case__ ) )) counter += 1 A : Any = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return F'0o{int(snake_case__ )}' def lowerCAmelCase_ ( ): '''simple docstring''' print('''\n2 in octal is:''' ) print(decimal_to_octal(2 ) ) # = 2 print('''\n8 in octal is:''' ) print(decimal_to_octal(8 ) ) # = 10 print('''\n65 in octal is:''' ) print(decimal_to_octal(65 ) ) # = 101 print('''\n216 in octal is:''' ) print(decimal_to_octal(216 ) ) # = 330 print('''\n512 in octal is:''' ) print(decimal_to_octal(512 ) ) # = 1000 print('''\n''' ) if __name__ == "__main__": main()
351
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : int = logging.get_logger(__name__) lowercase : int = { 'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json', # See all SEW models at https://huggingface.co/models?filter=sew } class A ( __snake_case ): __magic_name__ = '''sew''' def __init__( self , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE="group" , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.05 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="mean" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=2 , **SCREAMING_SNAKE_CASE , ) -> Tuple: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE , pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE ) A : Optional[Any] = hidden_size A : Any = feat_extract_norm A : Optional[int] = feat_extract_activation A : Tuple = list(SCREAMING_SNAKE_CASE ) A : List[str] = list(SCREAMING_SNAKE_CASE ) A : List[str] = list(SCREAMING_SNAKE_CASE ) A : int = conv_bias A : List[Any] = num_conv_pos_embeddings A : Tuple = num_conv_pos_embedding_groups A : int = len(self.conv_dim ) A : Dict = num_hidden_layers A : Optional[int] = intermediate_size A : Any = squeeze_factor A : int = hidden_act A : str = num_attention_heads A : Dict = hidden_dropout A : Optional[Any] = attention_dropout A : List[str] = activation_dropout A : Union[str, Any] = feat_proj_dropout A : Union[str, Any] = final_dropout A : int = layerdrop A : Optional[Any] = layer_norm_eps A : Any = initializer_range A : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A : Optional[Any] = apply_spec_augment A : Optional[Any] = mask_time_prob A : Union[str, Any] = mask_time_length A : Optional[Any] = mask_time_min_masks A : str = mask_feature_prob A : Tuple = mask_feature_length A : Any = mask_feature_min_masks # ctc loss A : List[Any] = ctc_loss_reduction A : Dict = ctc_zero_infinity # sequence classification A : int = use_weighted_layer_sum A : Optional[int] = classifier_proj_size @property def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
311
0
'''simple docstring''' from collections import defaultdict class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : List[str] = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 A : int = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(SCREAMING_SNAKE_CASE ) ) ] A : Optional[int] = defaultdict(SCREAMING_SNAKE_CASE ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 A : Dict = (1 << len(SCREAMING_SNAKE_CASE )) - 1 def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement A : Any = self.count_ways_until(SCREAMING_SNAKE_CASE , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. A : Optional[Any] = total_ways_util return self.dp[mask][task_no] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" for i in range(len(SCREAMING_SNAKE_CASE ) ): for j in task_performed[i]: self.task[j].append(SCREAMING_SNAKE_CASE ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": lowercase : Optional[Any] = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. lowercase : Union[str, Any] = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
352
'''simple docstring''' import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = SwinConfig() A : List[Any] = swin_name.split('''_''' ) A : Tuple = name_split[1] A : Union[str, Any] = int(name_split[4] ) A : str = int(name_split[3][-1] ) if model_size == "tiny": A : Optional[int] = 96 A : Optional[Any] = (2, 2, 6, 2) A : Any = (3, 6, 12, 24) elif model_size == "small": A : Optional[int] = 96 A : str = (2, 2, 18, 2) A : Tuple = (3, 6, 12, 24) elif model_size == "base": A : int = 128 A : Optional[Any] = (2, 2, 18, 2) A : List[str] = (4, 8, 16, 32) else: A : Dict = 192 A : Optional[Any] = (2, 2, 18, 2) A : Optional[Any] = (6, 12, 24, 48) if "in22k" in swin_name: A : Dict = 2_1841 else: A : str = 1000 A : List[str] = '''huggingface/label-files''' A : Any = '''imagenet-1k-id2label.json''' A : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) ) A : str = {int(snake_case__ ): v for k, v in idalabel.items()} A : Tuple = idalabel A : Tuple = {v: k for k, v in idalabel.items()} A : Tuple = img_size A : Dict = num_classes A : Optional[Any] = embed_dim A : str = depths A : str = num_heads A : Optional[int] = window_size return config def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if "patch_embed.proj" in name: A : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: A : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: A : Optional[int] = '''encoder.''' + name if "attn.proj" in name: A : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: A : List[str] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: A : Any = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: A : Tuple = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: A : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: A : str = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "norm.weight": A : Tuple = '''layernorm.weight''' if name == "norm.bias": A : Tuple = '''layernorm.bias''' if "head" in name: A : Any = name.replace('''head''' , '''classifier''' ) else: A : List[Any] = '''swin.''' + name return name def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): A : Dict = orig_state_dict.pop(snake_case__ ) if "mask" in key: continue elif "qkv" in key: A : Dict = key.split('''.''' ) A : Optional[int] = int(key_split[1] ) A : List[str] = int(key_split[3] ) A : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: A : Any = val[:dim, :] A : Dict = val[ dim : dim * 2, : ] A : List[str] = val[-dim:, :] else: A : Any = val[ :dim ] A : Optional[int] = val[ dim : dim * 2 ] A : Any = val[ -dim: ] else: A : str = val return orig_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Tuple = timm.create_model(snake_case__ , pretrained=snake_case__ ) timm_model.eval() A : Optional[Any] = get_swin_config(snake_case__ ) A : Optional[int] = SwinForImageClassification(snake_case__ ) model.eval() A : List[str] = convert_state_dict(timm_model.state_dict() , snake_case__ ) model.load_state_dict(snake_case__ ) A : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A : Any = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) ) A : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) A : List[Any] = image_processor(images=snake_case__ , return_tensors='''pt''' ) A : Any = timm_model(inputs['''pixel_values'''] ) A : Optional[Any] = model(**snake_case__ ).logits assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case__ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swin_name', default='swin_tiny_patch4_window7_224', type=str, help='Name of the Swin timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowercase : int = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
311
0
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowercase : Optional[int] = [ 'kernels/rwkv/wkv_cuda.cu', 'kernels/rwkv/wkv_op.cpp', 'kernels/deformable_detr/ms_deform_attn.h', 'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh', 'models/graphormer/algos_graphormer.pyx', ] def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowercase : str = argparse.ArgumentParser() parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.') lowercase : Optional[Any] = parser.parse_args() if args.check_lib: lowercase : List[Any] = importlib.import_module('transformers') lowercase : str = Path(transformers_module.__file__).parent else: lowercase : List[Any] = Path.cwd() / 'build/lib/transformers' if not test_custom_files_are_present(transformers_path): raise ValueError('The built release does not contain the custom files. Fix this before going further!')
353
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : Optional[int] = logging.get_logger(__name__) lowercase : Tuple = { 'google/pix2struct-textcaps-base': ( 'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json' ), } class A ( __snake_case ): __magic_name__ = '''pix2struct_text_model''' __magic_name__ = ['''past_key_values'''] __magic_name__ = { '''hidden_size''': '''hidden_size''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , SCREAMING_SNAKE_CASE=50244 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]: """simple docstring""" A : str = vocab_size A : List[str] = hidden_size A : List[Any] = d_kv A : Optional[Any] = d_ff A : Dict = num_layers A : Dict = num_heads A : Optional[int] = relative_attention_num_buckets A : Optional[Any] = relative_attention_max_distance A : Dict = dropout_rate A : Dict = layer_norm_epsilon A : Tuple = initializer_factor A : Union[str, Any] = use_cache A : int = eos_token_id A : List[str] = decoder_start_token_id # for backwards compatibility A : int = dense_act_fn super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , is_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) A, A : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": A : Union[str, Any] = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class A ( __snake_case ): __magic_name__ = '''pix2struct_vision_model''' def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=1e-10 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : List[str] = hidden_size A : Optional[Any] = patch_embed_hidden_size A : Union[str, Any] = d_ff A : Dict = dropout_rate A : str = num_hidden_layers A : Dict = num_attention_heads A : Tuple = initializer_range A : List[str] = initializer_factor A : Union[str, Any] = attention_dropout A : Tuple = layer_norm_eps A : int = dense_act_fn A : Optional[int] = seq_len A : Tuple = relative_attention_num_buckets A : str = relative_attention_max_distance A : Optional[Any] = d_kv @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) A, A : int = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": A : Optional[Any] = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class A ( __snake_case ): __magic_name__ = '''pix2struct''' __magic_name__ = True def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) if text_config is None: A : Dict = {} logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' ) if vision_config is None: A : str = {} logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' ) A : Dict = PixaStructTextConfig(**SCREAMING_SNAKE_CASE ) A : Any = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE ) A : Any = self.text_config.decoder_start_token_id A : Any = self.text_config.pad_token_id A : Dict = self.text_config.eos_token_id A : Union[str, Any] = initializer_factor A : Tuple = initializer_range A : Optional[Any] = self.initializer_range A : int = self.initializer_range A : Tuple = is_vqa @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Tuple = copy.deepcopy(self.__dict__ ) A : Dict = self.text_config.to_dict() A : int = self.vision_config.to_dict() A : Any = self.__class__.model_type return output
311
0
'''simple docstring''' import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' assert isinstance(snake_case__ , snake_case__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : int = tmp_path / '''cache''' A : List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): A : List[str] = SqlDatasetReader( '''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_sql_dataset(snake_case__ , snake_case__ ) @require_sqlalchemy @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Tuple = tmp_path / '''cache''' A : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} A : Optional[Any] = features.copy() if features else default_expected_features A : List[str] = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) A : Optional[Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=snake_case__ , cache_dir=snake_case__ ).read() _check_sql_dataset(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' with contextlib.closing(sqlitea.connect(snake_case__ ) ) as con: A : Tuple = con.cursor() cur.execute('''SELECT * FROM dataset''' ) for row in cur: yield row @require_sqlalchemy def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : List[str] = tmp_path / '''cache''' A : str = os.path.join(snake_case__ , '''tmp.sql''' ) A : Tuple = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case__ ).read() SqlDatasetWriter(snake_case__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write() A : int = iter_sql_file(snake_case__ ) A : List[str] = iter_sql_file(snake_case__ ) for rowa, rowa in zip(snake_case__ , snake_case__ ): assert rowa == rowa @require_sqlalchemy def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : str = tmp_path / '''cache''' A : Dict = os.path.join(snake_case__ , '''tmp.sql''' ) A : Optional[int] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case__ ).read() SqlDatasetWriter(snake_case__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write() A : str = iter_sql_file(snake_case__ ) A : Dict = iter_sql_file(snake_case__ ) for rowa, rowa in zip(snake_case__ , snake_case__ ): assert rowa == rowa @require_sqlalchemy def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = tmp_path / '''cache''' A : Optional[int] = os.path.join(snake_case__ , '''tmp.sql''' ) A : int = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case__ ).read() with pytest.raises(snake_case__ ): SqlDatasetWriter(snake_case__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
354
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : List[str] = 2 A : Dict = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(snake_case__ ) if n > 1: factors.append(snake_case__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
311
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: lowercase : int = None lowercase : Union[str, Any] = logging.get_logger(__name__) lowercase : str = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} lowercase : int = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), }, 'tokenizer_file': { 'google/bigbird-roberta-base': ( 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json' ), 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json' ), }, } lowercase : Optional[int] = { 'google/bigbird-roberta-base': 40_96, 'google/bigbird-roberta-large': 40_96, 'google/bigbird-base-trivia-itc': 40_96, } lowercase : List[str] = '▁' class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = BigBirdTokenizer __magic_name__ = ['''input_ids''', '''attention_mask'''] __magic_name__ = [] def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="[MASK]" , SCREAMING_SNAKE_CASE="[CLS]" , **SCREAMING_SNAKE_CASE , ) -> Dict: """simple docstring""" A : Dict = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else bos_token A : Any = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else eos_token A : int = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else unk_token A : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else pad_token A : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else cls_token A : Tuple = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else sep_token # Mask token behave like a normal word, i.e. include the space before it A : Any = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : str = vocab_file A : str = False if not self.vocab_file else True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : Optional[Any] = [self.sep_token_id] A : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : Any = [self.sep_token_id] A : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A : Optional[Any] = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
355
'''simple docstring''' # Function to print upper half of diamond (pyramid) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for i in range(0 , snake_case__ ): for _ in range(0 , n - i - 1 ): # printing spaces print(''' ''' , end='''''' ) for _ in range(0 , i + 1 ): # printing stars print('''* ''' , end='''''' ) print() def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for i in range(snake_case__ , 0 , -1 ): for _ in range(snake_case__ , 0 , -1 ): # printing stars print('''* ''' , end='''''' ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(''' ''' , end='''''' ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if n <= 0: print(''' ... .... nothing printing :(''' ) return floyd(snake_case__ ) # upper half reverse_floyd(snake_case__ ) # lower half if __name__ == "__main__": print(R'| /\ | |- | |- |--| |\ /| |-') print(R'|/ \| |- |_ |_ |__| | \/ | |_') lowercase : List[str] = 1 while K: lowercase : List[Any] = int(input('enter the number and , and see the magic : ')) print() pretty_print(user_number) lowercase : Any = int(input('press 0 to exit... and 1 to continue...')) print('Good Bye...')
311
0
'''simple docstring''' import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowercase = 16 lowercase = 32 def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return int(x / 2**20 ) class A : def __enter__( self ) -> Optional[int]: """simple docstring""" gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero A : Optional[int] = torch.cuda.memory_allocated() return self def __exit__( self , *SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" gc.collect() torch.cuda.empty_cache() A : Optional[int] = torch.cuda.memory_allocated() A : List[str] = torch.cuda.max_memory_allocated() A : Union[str, Any] = bamb(self.end - self.begin ) A : Union[str, Any] = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def lowerCAmelCase_ ( snake_case__ , snake_case__ = 16 , snake_case__ = "bert-base-cased" , snake_case__ = 320 , snake_case__ = 160 , ): '''simple docstring''' A : str = AutoTokenizer.from_pretrained(snake_case__ ) A : Union[str, Any] = load_dataset( '''glue''' , '''mrpc''' , split={'''train''': F'train[:{n_train}]', '''validation''': F'validation[:{n_val}]'} ) def tokenize_function(snake_case__ ): # max_length=None => use the model max length (it's actually the default) A : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case__ , max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset A : Any = datasets.map( snake_case__ , batched=snake_case__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A : Any = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(snake_case__ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(snake_case__ , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. A : List[Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) A : Tuple = DataLoader( tokenized_datasets['''validation'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) return train_dataloader, eval_dataloader def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A : List[str] = config['''lr'''] A : List[Any] = int(config['''num_epochs'''] ) A : Optional[Any] = int(config['''seed'''] ) A : List[str] = int(config['''batch_size'''] ) A : Optional[int] = args.model_name_or_path set_seed(snake_case__ ) A : str = get_dataloaders(snake_case__ , snake_case__ , snake_case__ , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A : Optional[int] = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ ) # Instantiate optimizer A : Optional[int] = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) A : Tuple = optimizer_cls(params=model.parameters() , lr=snake_case__ ) if accelerator.state.deepspeed_plugin is not None: A : List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: A : Optional[int] = 1 A : str = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): A : Optional[int] = get_linear_schedule_with_warmup( optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , ) else: A : str = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A : Union[str, Any] = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # We need to keep track of how many total steps we have iterated over A : Any = 0 # We also need to keep track of the stating epoch so files are named properly A : Tuple = 0 # Now we train the model A : Optional[int] = {} for epoch in range(snake_case__ , snake_case__ ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(snake_case__ ): A : int = model(**snake_case__ ) A : List[str] = outputs.loss A : int = loss / gradient_accumulation_steps accelerator.backward(snake_case__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) ) accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) ) accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) ) accelerator.print( '''Total Peak Memory consumed during the train (max): {}'''.format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) A : List[Any] = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[F'epoch-{epoch}'] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f: json.dump(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( ): '''simple docstring''' A : Tuple = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=snake_case__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case__ , ) parser.add_argument( '''--output_dir''' , type=snake_case__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--peak_memory_upper_bound''' , type=snake_case__ , default=snake_case__ , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , ) parser.add_argument( '''--n_train''' , type=snake_case__ , default=320 , help='''Number of training examples to use.''' , ) parser.add_argument( '''--n_val''' , type=snake_case__ , default=160 , help='''Number of validation examples to use.''' , ) parser.add_argument( '''--num_epochs''' , type=snake_case__ , default=1 , help='''Number of train epochs.''' , ) A : Any = parser.parse_args() A : Tuple = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(snake_case__ , snake_case__ ) if __name__ == "__main__": main()
356
'''simple docstring''' # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__() self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" A : List[Any] = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE , ) A : Optional[Any] = image.to(self.device ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A : Tuple = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A : List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE ), "This is a local test"
311
0
'''simple docstring''' from collections.abc import Callable import numpy as np def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : int = int(np.ceil((x_end - xa) / step_size ) ) A : Optional[int] = np.zeros((n + 1,) ) A : List[Any] = ya A : Tuple = xa for k in range(snake_case__ ): A : str = y[k] + step_size * ode_func(snake_case__ , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
357
'''simple docstring''' import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , ) -> str: """simple docstring""" A : Any = parent A : List[Any] = batch_size A : Union[str, Any] = seq_length A : Any = is_training A : int = use_input_mask A : Union[str, Any] = vocab_size A : List[Any] = hidden_size A : List[Any] = num_hidden_layers A : Optional[int] = num_attention_heads A : str = intermediate_size A : Tuple = hidden_act A : Union[str, Any] = hidden_dropout_prob A : Union[str, Any] = attention_probs_dropout_prob A : int = max_position_embeddings A : Optional[int] = initializer_range A : Any = use_labels A : Optional[int] = scope def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Optional[int] = None if self.use_input_mask: A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Dict = self.get_config() return config, input_ids, input_mask, token_labels def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" ( ( A ), ( A ), ( A ), ( A ), ) : Any = self.prepare_config_and_inputs() A : Tuple = True A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" A : List[str] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) A : int = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" A : List[str] = True A : Union[str, Any] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , ) A : List[Any] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = True A : Tuple = True A : Optional[int] = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval() # first forward pass A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , ) A : Optional[int] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size ) A : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and A : Dict = torch.cat([input_ids, next_tokens] , dim=-1 ) A : List[str] = torch.cat([input_mask, next_mask] , dim=-1 ) A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] A : Any = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] # select random slice A : int = ids_tensor((1,) , output_from_past.shape[-1] ).item() A : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() A : str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" A : Optional[Any] = BertGenerationDecoder(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A, A, A, A : Optional[int] = self.prepare_config_and_inputs() A : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () __magic_name__ = (BertGenerationDecoder,) if is_torch_available() else () __magic_name__ = ( {'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder} if is_torch_available() else {} ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : List[str] = BertGenerationEncoderTester(self ) A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A, A, A, A : Tuple = self.model_tester.prepare_config_and_inputs() A : str = '''bert''' self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" ( ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() A : Union[str, Any] = None self.model_tester.create_and_check_model_as_decoder( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Optional[Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Tuple = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) A : Optional[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): A : Dict = model(SCREAMING_SNAKE_CASE )[0] A : Optional[Any] = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Dict = torch.tensor( [[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[Any] = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) A : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): A : Optional[Any] = model(SCREAMING_SNAKE_CASE )[0] A : Optional[Any] = torch.Size([1, 8, 50358] ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Any = torch.tensor( [[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
311
0
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase : List[str] = { 'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'], 'tokenization_cpmant': ['CpmAntTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ 'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST', 'CpmAntForCausalLM', 'CpmAntModel', 'CpmAntPreTrainedModel', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
358
'''simple docstring''' import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return 1.0 / (1.0 + np.exp(-_outputs )) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[int] = np.max(_outputs , axis=-1 , keepdims=snake_case__ ) A : Any = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case__ ) class A ( __snake_case ): __magic_name__ = '''sigmoid''' __magic_name__ = '''softmax''' __magic_name__ = '''none''' @add_end_docstrings( __snake_case , R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class A ( __snake_case ): __magic_name__ = False __magic_name__ = ClassificationFunction.NONE def __init__( self , **SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="" , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Optional[Any] = tokenizer_kwargs A : int = {} if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None: A : int = self.model.config.return_all_scores if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or top_k is None: A : Union[str, Any] = top_k A : Dict = False elif return_all_scores is not None: warnings.warn( '''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of''' ''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , SCREAMING_SNAKE_CASE , ) if return_all_scores: A : Optional[int] = None else: A : Dict = 1 if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : Dict = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: A : int = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : str = super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. A : Any = '''top_k''' not in kwargs if isinstance(args[0] , SCREAMING_SNAKE_CASE ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]: """simple docstring""" A : List[Any] = self.framework if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return self.tokenizer(**SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0] , SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( '''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a''' ''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' ) return self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" return self.model(**SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=True ) -> List[str]: """simple docstring""" if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: A : Optional[int] = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: A : Any = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None: A : Optional[int] = self.model.config.function_to_apply else: A : Optional[int] = ClassificationFunction.NONE A : Any = model_outputs['''logits'''][0] A : List[Any] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: A : int = sigmoid(SCREAMING_SNAKE_CASE ) elif function_to_apply == ClassificationFunction.SOFTMAX: A : Any = softmax(SCREAMING_SNAKE_CASE ) elif function_to_apply == ClassificationFunction.NONE: A : int = outputs else: raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} A : int = [ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(SCREAMING_SNAKE_CASE ) ] if not _legacy: dict_scores.sort(key=lambda SCREAMING_SNAKE_CASE : x["score"] , reverse=SCREAMING_SNAKE_CASE ) if top_k is not None: A : Union[str, Any] = dict_scores[:top_k] return dict_scores
311
0
'''simple docstring''' import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : int = AlbertConfig.from_json_file(snake_case__ ) print(F'Building PyTorch model from configuration: {config}' ) A : Dict = AlbertForPreTraining(snake_case__ ) # Load weights from tf checkpoint load_tf_weights_in_albert(snake_case__ , snake_case__ , snake_case__ ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , snake_case__ ) if __name__ == "__main__": lowercase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--albert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained ALBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowercase : Dict = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
359
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def lowerCAmelCase_ ( snake_case__ = "laptop" ): '''simple docstring''' A : Tuple = F'https://www.amazon.in/laptop/s?k={product}' A : Optional[int] = { '''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''', '''Accept-Language''': '''en-US, en;q=0.5''', } A : Any = BeautifulSoup(requests.get(snake_case__ , headers=snake_case__ ).text ) # Initialize a Pandas dataframe with the column titles A : List[str] = DataFrame( columns=[ '''Product Title''', '''Product Link''', '''Current Price of the product''', '''Product Rating''', '''MRP of the product''', '''Discount''', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( '''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ): try: A : Optional[Any] = item.ha.text A : Union[str, Any] = '''https://www.amazon.in/''' + item.ha.a['''href'''] A : Tuple = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text try: A : int = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text except AttributeError: A : Optional[int] = '''Not available''' try: A : str = ( '''₹''' + item.find( '''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1] ) except AttributeError: A : List[Any] = '''''' try: A : Dict = float( ( ( float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) - float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) ) ) / float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) ) * 100 ) except ValueError: A : str = float('''nan''' ) except AttributeError: pass A : Union[str, Any] = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] A : List[str] = ''' ''' A : Optional[Any] = ''' ''' data_frame.index += 1 return data_frame if __name__ == "__main__": lowercase : Union[str, Any] = 'headphones' get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
311
0
'''simple docstring''' import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('>=', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType lowercase : Optional[Any] = get_logger(__name__) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=0 ): '''simple docstring''' os.makedirs(snake_case__ , exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A : str = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A : List[str] = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' A : Any = os.path.join(snake_case__ , snake_case__ ) if accelerator.process_index == 0: logger.info(F'Saving model to {output_model_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A : List[str] = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A : List[Any] = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Saving model to {output_model_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A : int = os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) logger.info(F'Saving model to {ckpt_dir}' ) A : Optional[int] = {'''model''': state_dict} dist_cp.save_state_dict( state_dict=snake_case__ , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Model saved to {ckpt_dir}' ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=0 ): '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(snake_case__ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( '''Set the `sync_module_states` flag to `True` so that model states are synced across processes when ''' '''initializing FSDP object''' ) return A : str = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' A : List[str] = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading model from {input_model_file}' ) A : List[Any] = torch.load(snake_case__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A : Any = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A : int = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading model from {input_model_file}' ) A : Optional[int] = torch.load(snake_case__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A : Optional[int] = ( os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' ) if F'{MODEL_NAME}' not in input_dir else input_dir ) logger.info(F'Loading model from {ckpt_dir}' ) A : Optional[int] = {'''model''': model.state_dict()} dist_cp.load_state_dict( state_dict=snake_case__ , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , planner=DefaultLoadPlanner() , ) A : Union[str, Any] = state_dict['''model'''] logger.info(F'Model loaded from {ckpt_dir}' ) model.load_state_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=0 ): '''simple docstring''' os.makedirs(snake_case__ , exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A : Any = FSDP.optim_state_dict(snake_case__ , snake_case__ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: A : Optional[Any] = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A : Dict = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Saving Optimizer state to {output_optimizer_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Optimizer state saved in {output_optimizer_file}' ) else: A : Any = os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) logger.info(F'Saving Optimizer state to {ckpt_dir}' ) dist_cp.save_state_dict( state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Optimizer state saved in {ckpt_dir}' ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=0 ): '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A : Tuple = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: A : Optional[Any] = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A : Optional[int] = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading Optimizer state from {input_optimizer_file}' ) A : List[Any] = torch.load(snake_case__ ) logger.info(F'Optimizer state loaded from {input_optimizer_file}' ) else: A : Dict = ( os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) if F'{OPTIMIZER_NAME}' not in input_dir else input_dir ) logger.info(F'Loading Optimizer from {ckpt_dir}' ) A : str = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , ) A : Optional[int] = optim_state['''optimizer'''] logger.info(F'Optimizer loaded from {ckpt_dir}' ) A : List[Any] = FSDP.optim_state_dict_to_load(snake_case__ , snake_case__ , snake_case__ ) optimizer.load_state_dict(snake_case__ )
360
'''simple docstring''' import colorsys from PIL import Image # type: ignore def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[int] = x A : str = y for step in range(snake_case__ ): # noqa: B007 A : str = a * a - b * b + x A : List[str] = 2 * a * b + y A : str = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1 ) ) def lowerCAmelCase_ ( snake_case__ = 800 , snake_case__ = 600 , snake_case__ = -0.6 , snake_case__ = 0 , snake_case__ = 3.2 , snake_case__ = 50 , snake_case__ = True , ): '''simple docstring''' A : List[Any] = Image.new('''RGB''' , (image_width, image_height) ) A : Tuple = img.load() # loop through the image-coordinates for image_x in range(snake_case__ ): for image_y in range(snake_case__ ): # determine the figure-coordinates based on the image-coordinates A : Optional[int] = figure_width / image_width * image_height A : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width A : List[str] = figure_center_y + (image_y / image_height - 0.5) * figure_height A : str = get_distance(snake_case__ , snake_case__ , snake_case__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: A : str = get_color_coded_rgb(snake_case__ ) else: A : List[Any] = get_black_and_white_rgb(snake_case__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure lowercase : Optional[Any] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
311
0
'''simple docstring''' import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument lowercase : str = { '/attention/': '/0/SelfAttention/', '/self_attention/': '/0/SelfAttention/', '/encoder_decoder_attention/': '/1/EncDecAttention/', 'value': 'v', 'query': 'q', 'key': 'k', 'out': 'o', 'pre_self_attention_layer_norm': '0/layer_norm', 'pre_cross_attention_layer_norm': '1/layer_norm', 'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong 'token_embedder': 'shared', 'encoder_norm': 'final_layer_norm', 'decoder_norm': 'final_layer_norm', 'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight', 'router/router_weights/w/': 'router/classifier/', 'roer/roer_weights/w/': 'router/classifier/', 'logits_dense': 'lm_head', } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Tuple = list(s_dict.keys() ) for key in keys: A : List[str] = R'''.*/layers_(\d+)''' A : int = key if re.match(snake_case__ , snake_case__ ): A : List[str] = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , snake_case__ ) A : Optional[Any] = R'''(encoder|decoder)\/''' if re.match(snake_case__ , snake_case__ ): A : Optional[int] = re.match(snake_case__ , snake_case__ ).groups() if groups[0] == "encoder": A : Tuple = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , snake_case__ ) A : Optional[int] = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , snake_case__ ) elif groups[0] == "decoder": A : str = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , snake_case__ ) A : List[str] = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , snake_case__ ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: A : List[str] = new_key.replace(snake_case__ , snake_case__ ) print(F'{key} -> {new_key}' ) A : Optional[Any] = s_dict.pop(snake_case__ ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: A : Union[str, Any] = s_dict[ '''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: A : str = s_dict[ '''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: A : Union[str, Any] = s_dict[key].shape[0] A : str = s_dict[key] for idx in range(snake_case__ ): A : Union[str, Any] = expert_weihts[idx] print(F'{key} -> {key.replace("expert/" , "nested fstring" )}' ) s_dict.pop(snake_case__ ) return s_dict lowercase : Dict = { 'NUM_ENCODER_LAYERS': 'num_layers', 'NUM_DECODER_LAYERS': 'num_decoder_layers', 'NUM_HEADS': 'num_heads', 'HEAD_DIM': 'd_kv', 'EMBED_DIM': 'd_model', 'MLP_DIM': 'd_ff', 'NUM_SELECTED_EXPERTS': 'num_selected_experts', 'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers', 'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers', 'dense.MlpBlock.activations': 'feed_forward_proj', } def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import regex as re with open(snake_case__ , '''r''' ) as f: A : Optional[Any] = f.read() A : Union[str, Any] = re.findall(R'''(.*) = ([0-9.]*)''' , snake_case__ ) A : Dict = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": A : Any = float(snake_case__ ) if '''.''' in value else int(snake_case__ ) A : str = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , snake_case__ )[0] A : Union[str, Any] = str(activation[1] ) A : List[Any] = num_experts A : int = SwitchTransformersConfig(**snake_case__ ) return config def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__="./" , snake_case__=8 ): '''simple docstring''' print(F'Loading flax weights from : {flax_checkpoint_path}' ) A : Tuple = checkpoints.load_tax_checkpoint(snake_case__ ) if gin_file is not None: A : str = convert_gin_to_config(snake_case__ , snake_case__ ) else: A : Union[str, Any] = SwitchTransformersConfig.from_pretrained(snake_case__ ) A : Dict = SwitchTransformersForConditionalGeneration(snake_case__ ) A : Tuple = flax_params['''target'''] A : Tuple = flatten_dict(snake_case__ , sep='''/''' ) A : str = rename_keys(snake_case__ ) A : Dict = unflatten_dict(snake_case__ , sep='''/''' ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) print(F'Save PyTorch model to {pytorch_dump_path}' ) pt_model.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--switch_t5x_checkpoint_path', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the' ' model architecture. If not provided, a `gin_file` has to be provided.' ), ) parser.add_argument( '--gin_file', default=None, type=str, required=False, help='Path to the gin config file. If not provided, a `config_file` has to be passed ', ) parser.add_argument( '--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.' ) parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts') lowercase : Any = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
361
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowercase : Optional[int] = [ 'kernels/rwkv/wkv_cuda.cu', 'kernels/rwkv/wkv_op.cpp', 'kernels/deformable_detr/ms_deform_attn.h', 'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh', 'models/graphormer/algos_graphormer.pyx', ] def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowercase : str = argparse.ArgumentParser() parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.') lowercase : Optional[Any] = parser.parse_args() if args.check_lib: lowercase : List[Any] = importlib.import_module('transformers') lowercase : str = Path(transformers_module.__file__).parent else: lowercase : List[Any] = Path.cwd() / 'build/lib/transformers' if not test_custom_files_are_present(transformers_path): raise ValueError('The built release does not contain the custom files. Fix this before going further!')
311
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Tuple = [1] A : Dict = 0, 0, 0 A : Tuple = ugly_nums[ia] * 2 A : List[str] = ugly_nums[ia] * 3 A : Optional[int] = ugly_nums[ia] * 5 for _ in range(1 , snake_case__ ): A : Any = min(snake_case__ , snake_case__ , snake_case__ ) ugly_nums.append(snake_case__ ) if next_num == next_a: ia += 1 A : Tuple = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 A : Dict = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 A : Tuple = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(f'''{ugly_numbers(2_00) = }''')
362
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=2 , ) -> List[str]: """simple docstring""" A : List[str] = parent A : Optional[Any] = batch_size A : Tuple = image_size A : int = patch_size A : Optional[int] = num_channels A : str = is_training A : List[Any] = use_labels A : Any = hidden_size A : Any = num_hidden_layers A : Optional[int] = num_attention_heads A : Any = intermediate_size A : List[str] = hidden_act A : str = hidden_dropout_prob A : Tuple = attention_probs_dropout_prob A : Any = type_sequence_label_size A : Optional[int] = initializer_range A : Dict = scope A : Tuple = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A : List[Any] = (image_size // patch_size) ** 2 A : Tuple = num_patches + 2 def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Tuple = None if self.use_labels: A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Tuple = self.get_config() return config, pixel_values, labels def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : Any = TFDeiTModel(config=SCREAMING_SNAKE_CASE ) A : str = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Tuple = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE ) A : List[Any] = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A : Optional[int] = 1 A : str = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE ) A : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Tuple = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" A : str = self.type_sequence_label_size A : Optional[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE ) A : Optional[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A : Optional[Any] = 1 A : List[str] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE ) A : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Optional[int] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Optional[int] = self.prepare_config_and_inputs() A, A, A : Tuple = config_and_inputs A : Any = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A ( __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) __magic_name__ = ( { '''feature-extraction''': TFDeiTModel, '''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Tuple = TFDeiTModelTester(self ) A : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" pass def __lowerCAmelCase ( self ) -> str: """simple docstring""" A, A : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Any = model_class(SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) A : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Dense ) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A, A : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Any = model_class(SCREAMING_SNAKE_CASE ) A : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple: """simple docstring""" A : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def __lowerCAmelCase ( self ) -> str: """simple docstring""" for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : List[str] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def lowerCAmelCase_ ( ): '''simple docstring''' A : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class A ( unittest.TestCase ): @cached_property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) A : Dict = self.default_image_processor A : List[str] = prepare_img() A : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) # forward pass A : Optional[int] = model(**SCREAMING_SNAKE_CASE ) # verify the logits A : List[Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE ) A : str = tf.constant([-1.0_266, 0.1_912, -1.2_861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
311
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ , snake_case__ = False ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): A : List[Any] = F'Expected string as input, found {type(snake_case__ )}' raise ValueError(snake_case__ ) if not isinstance(snake_case__ , snake_case__ ): A : List[Any] = F'Expected boolean as use_pascal parameter, found {type(snake_case__ )}' raise ValueError(snake_case__ ) A : List[Any] = input_str.split('''_''' ) A : Optional[Any] = 0 if use_pascal else 1 A : List[str] = words[start_index:] A : int = [word[0].upper() + word[1:] for word in words_to_capitalize] A : Any = '''''' if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
363
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase : List[str] = { 'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'], 'tokenization_cpmant': ['CpmAntTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ 'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST', 'CpmAntForCausalLM', 'CpmAntModel', 'CpmAntPreTrainedModel', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
311
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowercase : str = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Dict = ['FNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Tuple = ['FNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ 'FNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetLayer', 'FNetModel', 'FNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
364
'''simple docstring''' from __future__ import annotations lowercase : Union[str, Any] = list[tuple[int, int]] lowercase : Optional[Any] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase : Any = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" A : int = pos_x A : Optional[Any] = pos_y A : Optional[Any] = (pos_y, pos_x) A : str = goal_x A : Optional[int] = goal_y A : List[Any] = g_cost A : str = parent A : str = self.calculate_heuristic() def __lowerCAmelCase ( self ) -> float: """simple docstring""" A : Optional[int] = abs(self.pos_x - self.goal_x ) A : Optional[Any] = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" return self.f_cost < other.f_cost class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE ) A : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE ) A : Optional[Any] = [self.start] A : list[Node] = [] A : Tuple = False def __lowerCAmelCase ( self ) -> Path | None: """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() A : Optional[int] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: A : Optional[int] = True return self.retrace_path(SCREAMING_SNAKE_CASE ) self.closed_nodes.append(SCREAMING_SNAKE_CASE ) A : Any = self.get_successors(SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: # retrieve the best current path A : str = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(SCREAMING_SNAKE_CASE ) if not self.reached: return [self.start.pos] return None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[Node]: """simple docstring""" A : List[Any] = [] for action in delta: A : List[str] = parent.pos_x + action[1] A : Dict = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE , ) ) return successors def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Path: """simple docstring""" A : int = node A : Union[str, Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) A : int = current_node.parent path.reverse() return path if __name__ == "__main__": lowercase : Tuple = (0, 0) lowercase : List[str] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') lowercase : int = GreedyBestFirst(init, goal) lowercase : Union[str, Any] = greedy_bf.search() if path: for pos_x, pos_y in path: lowercase : Dict = 2 for elem in grid: print(elem)
311
0
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def lowerCAmelCase_ ( snake_case__ = "laptop" ): '''simple docstring''' A : Tuple = F'https://www.amazon.in/laptop/s?k={product}' A : Optional[int] = { '''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''', '''Accept-Language''': '''en-US, en;q=0.5''', } A : Any = BeautifulSoup(requests.get(snake_case__ , headers=snake_case__ ).text ) # Initialize a Pandas dataframe with the column titles A : List[str] = DataFrame( columns=[ '''Product Title''', '''Product Link''', '''Current Price of the product''', '''Product Rating''', '''MRP of the product''', '''Discount''', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( '''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ): try: A : Optional[Any] = item.ha.text A : Union[str, Any] = '''https://www.amazon.in/''' + item.ha.a['''href'''] A : Tuple = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text try: A : int = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text except AttributeError: A : Optional[int] = '''Not available''' try: A : str = ( '''₹''' + item.find( '''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1] ) except AttributeError: A : List[Any] = '''''' try: A : Dict = float( ( ( float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) - float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) ) ) / float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) ) * 100 ) except ValueError: A : str = float('''nan''' ) except AttributeError: pass A : Union[str, Any] = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] A : List[str] = ''' ''' A : Optional[Any] = ''' ''' data_frame.index += 1 return data_frame if __name__ == "__main__": lowercase : Union[str, Any] = 'headphones' get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
365
'''simple docstring''' import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py lowercase : Any = 'src/transformers' lowercase : str = 'docs/source/en/tasks' def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: A : Union[str, Any] = f.readlines() # Find the start prompt. A : List[Any] = 0 while not lines[start_index].startswith(snake_case__ ): start_index += 1 start_index += 1 A : List[str] = start_index while not lines[end_index].startswith(snake_case__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. lowercase : int = direct_transformers_import(TRANSFORMERS_PATH) lowercase : str = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). lowercase : Optional[int] = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : int = TASK_GUIDE_TO_MODELS[task_guide] A : List[str] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() ) A : Union[str, Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n" def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A, A, A, A : Optional[int] = _find_text_in_file( filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , ) A : Optional[int] = get_model_list_for_task(snake_case__ ) if current_list != new_list: if overwrite: with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`' ''' to fix this.''' ) if __name__ == "__main__": lowercase : Dict = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') lowercase : List[Any] = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
311
0
'''simple docstring''' import os import sys import unittest lowercase : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py') lowercase : int = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py') class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : List[str] = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Optional[Any] = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : List[str] = {'''BertModelTest''': '''BertModelTester'''} A : List[Any] = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Optional[Any] = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : Dict = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : Any = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A : Optional[Any] = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : str = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Dict = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Optional[Any] = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A : Any = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
366
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if len(snake_case__ ) <= 1: return [tuple(snake_case__ )] A : Tuple = [] def generate(snake_case__ , snake_case__ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , snake_case__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A, A : Optional[Any] = arr[k - 1], arr[i] else: # k is odd A, A : Optional[Any] = arr[k - 1], arr[0] generate(k - 1 , snake_case__ ) generate(len(snake_case__ ) , snake_case__ ) return res if __name__ == "__main__": lowercase : List[str] = input('Enter numbers separated by a comma:\n').strip() lowercase : int = [int(item) for item in user_input.split(',')] print(heaps(arr))
311
0
'''simple docstring''' import math from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : List[Any] = logging.get_logger(__name__) lowercase : Optional[Any] = { 'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json', # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class A ( __snake_case ): __magic_name__ = '''data2vec-audio''' def __init__( self , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=19 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=0.05 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="sum" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1500) , SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE , pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE ) A : Optional[Any] = hidden_size A : Any = feat_extract_activation A : str = list(SCREAMING_SNAKE_CASE ) A : List[str] = list(SCREAMING_SNAKE_CASE ) A : List[Any] = list(SCREAMING_SNAKE_CASE ) A : str = conv_bias A : Any = num_conv_pos_embeddings A : Tuple = num_conv_pos_embedding_groups A : Union[str, Any] = conv_pos_kernel_size A : Optional[int] = len(self.conv_dim ) A : str = num_hidden_layers A : int = intermediate_size A : Optional[int] = hidden_act A : Dict = num_attention_heads A : str = hidden_dropout A : List[Any] = attention_dropout A : int = activation_dropout A : Any = feat_proj_dropout A : Optional[int] = final_dropout A : List[str] = layerdrop A : int = layer_norm_eps A : int = initializer_range A : int = vocab_size A : List[Any] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A : List[Any] = mask_time_prob A : Tuple = mask_time_length A : Dict = mask_time_min_masks A : List[str] = mask_feature_prob A : Tuple = mask_feature_length A : Optional[int] = mask_feature_min_masks # ctc loss A : Tuple = ctc_loss_reduction A : int = ctc_zero_infinity # adapter A : Optional[Any] = add_adapter A : List[Any] = adapter_kernel_size A : Optional[Any] = adapter_stride A : List[str] = num_adapter_layers A : Optional[Any] = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. A : Optional[int] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. A : Dict = list(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = list(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = list(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = xvector_output_dim @property def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" return math.prod(self.conv_stride )
367
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A ( __snake_case ): __magic_name__ = (UniPCMultistepScheduler,) __magic_name__ = (('''num_inference_steps''', 25),) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : str = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''solver_type''': '''bh2''', } config.update(**SCREAMING_SNAKE_CASE ) return config def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : List[Any] = dict(self.forward_default_kwargs ) A : Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) A : Optional[Any] = self.dummy_sample A : int = 0.1 * sample A : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals A : List[Any] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE ) A : List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE ) new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals A : Dict = dummy_past_residuals[: new_scheduler.config.solver_order] A, A : Tuple = sample, sample for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ): A : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Optional[Any] = dict(self.forward_default_kwargs ) A : Tuple = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) A : List[Any] = self.dummy_sample A : int = 0.1 * sample A : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A : Optional[int] = self.get_scheduler_config() A : Any = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals (must be after setting timesteps) A : int = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE ) A : int = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residual (must be after setting timesteps) A : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order] A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" if scheduler is None: A : Dict = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Tuple = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : int = 10 A : Tuple = self.dummy_model() A : Any = self.dummy_sample_deter scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): A : int = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample return sample def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = dict(self.forward_default_kwargs ) A : List[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config() A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Optional[Any] = self.dummy_sample A : Optional[int] = 0.1 * sample if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ): scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ): A : Tuple = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A : Dict = [residual + 0.2, residual + 0.15, residual + 0.10] A : List[str] = dummy_past_residuals[: scheduler.config.solver_order] A : List[Any] = scheduler.timesteps[5] A : Dict = scheduler.timesteps[6] A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() ) A : List[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE ) A : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 A : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config ) A : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config ) A : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config ) A : Optional[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE ) A : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , ) A : Dict = self.full_loop( solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , ) assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers" def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE ) self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : int = self.full_loop() A : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : List[Any] = self.full_loop(prediction_type='''v_prediction''' ) A : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.1_014 ) < 1e-3 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Dict = self.scheduler_classes[0] A : List[Any] = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 ) A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Tuple = 10 A : Union[str, Any] = self.dummy_model() A : Dict = self.dummy_sample_deter.half() scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): A : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample assert sample.dtype == torch.floataa def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
311
0
'''simple docstring''' import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) lowercase : Dict = getLogger(__name__) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = 8 , snake_case__ = 1024 , snake_case__="val" , snake_case__=None , snake_case__=False , snake_case__="summarization" , snake_case__=None , snake_case__=1 , snake_case__ = None , snake_case__="" , **snake_case__ , ): '''simple docstring''' A : str = str(snake_case__ ) assert local_rank is not None torch.distributed.init_process_group(backend='''nccl''' , rank=snake_case__ ) A : Tuple = Path(snake_case__ ) A : str = save_dir.joinpath(F'rank_{local_rank}_output.json' ) torch.cuda.set_device(snake_case__ ) A : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(snake_case__ ).cuda() if fpaa: A : Tuple = model.half() # determine if we need to increase num_beams use_task_specific_params(snake_case__ , snake_case__ ) # update config with task specific params A : Optional[int] = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: A : int = num_return_sequences A : int = AutoTokenizer.from_pretrained(snake_case__ ) logger.info(F'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type. if max_source_length is None: A : Union[str, Any] = tokenizer.model_max_length if prefix is None: A : str = prefix or getattr(model.config , '''prefix''' , '''''' ) or '''''' A : Tuple = SeqaSeqDataset( snake_case__ , snake_case__ , snake_case__ , max_target_length=1024 , type_path=snake_case__ , n_obs=snake_case__ , prefix=snake_case__ , **snake_case__ , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. A : str = ds.make_sortish_sampler(snake_case__ , distributed=snake_case__ , add_extra_examples=snake_case__ , shuffle=snake_case__ ) A : str = DataLoader(snake_case__ , sampler=snake_case__ , batch_size=snake_case__ , collate_fn=ds.collate_fn ) A : Any = [] for batch in tqdm(snake_case__ ): A : Optional[int] = model.generate( input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=snake_case__ , num_beams=snake_case__ , **snake_case__ , ) A : Optional[int] = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ ) A : List[str] = batch['''ids'''] if num_return_sequences > 1: A : Optional[int] = chunks(snake_case__ , snake_case__ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(snake_case__ ): results.append({'''pred''': pred, '''id''': ids[i].item()} ) save_json(snake_case__ , snake_case__ ) return results, sampler.num_replicas def lowerCAmelCase_ ( ): '''simple docstring''' A : Dict = argparse.ArgumentParser( epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' ) parser.add_argument('''--data_dir''' , type=snake_case__ , help='''like cnn_dm/test.source''' ) parser.add_argument( '''--model_name''' , type=snake_case__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , ) parser.add_argument('''--save_dir''' , type=snake_case__ , help='''where to save''' , default='''tmp_gen''' ) parser.add_argument('''--max_source_length''' , type=snake_case__ , default=snake_case__ ) parser.add_argument( '''--type_path''' , type=snake_case__ , default='''test''' , help='''which subset to evaluate typically train/val/test''' ) parser.add_argument('''--task''' , type=snake_case__ , default='''summarization''' , help='''used for task_specific_params + metrics''' ) parser.add_argument('''--bs''' , type=snake_case__ , default=8 , required=snake_case__ , help='''batch size''' ) parser.add_argument( '''--local_rank''' , type=snake_case__ , default=-1 , required=snake_case__ , help='''should be passed by distributed.launch''' ) parser.add_argument( '''--n_obs''' , type=snake_case__ , default=snake_case__ , required=snake_case__ , help='''How many observations. Defaults to all.''' ) parser.add_argument( '''--num_return_sequences''' , type=snake_case__ , default=1 , required=snake_case__ , help='''How many sequences to return''' ) parser.add_argument( '''--sync_timeout''' , type=snake_case__ , default=600 , required=snake_case__ , help='''How long should master process wait for other processes to finish.''' , ) parser.add_argument('''--src_lang''' , type=snake_case__ , default=snake_case__ , required=snake_case__ ) parser.add_argument('''--tgt_lang''' , type=snake_case__ , default=snake_case__ , required=snake_case__ ) parser.add_argument( '''--prefix''' , type=snake_case__ , required=snake_case__ , default=snake_case__ , help='''will be added to the begininng of src examples''' ) parser.add_argument('''--fp16''' , action='''store_true''' ) parser.add_argument('''--debug''' , action='''store_true''' ) A : str = time.time() A : str = parser.parse_known_args() A : Dict = parse_numeric_n_bool_cl_kwargs(snake_case__ ) if generate_kwargs and args.local_rank <= 0: print(F'parsed the following generate kwargs: {generate_kwargs}' ) A : List[str] = Path(args.save_dir + '''_tmp''' ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) # this handles locking. A : str = list(json_save_dir.glob('''rank_*.json''' ) ) if intermediate_files: raise ValueError(F'Found files at {json_save_dir} please move or remove them.' ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. A : Tuple = {} if args.src_lang is not None: A : List[Any] = args.src_lang if args.tgt_lang is not None: A : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=snake_case__ ) A : Any = eval_data_dir( args.data_dir , snake_case__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=snake_case__ , **snake_case__ , ) if args.local_rank <= 0: A : Any = Path(args.save_dir ) save_dir.mkdir(exist_ok=snake_case__ ) A : List[str] = gather_results_from_each_node(snake_case__ , snake_case__ , args.sync_timeout ) A : List[Any] = combine_partial_results(snake_case__ ) if args.num_return_sequences > 1: A : int = save_dir.joinpath('''pseudolabel_results.json''' ) print(F'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' ) save_json(snake_case__ , snake_case__ ) return A : Tuple = Path(args.data_dir ).joinpath(args.type_path + '''.target''' ) with open(snake_case__ ) as f: A : List[str] = [x.rstrip() for x in f.readlines()][: len(snake_case__ )] # Calculate metrics, save metrics, and save _generations.txt A : Optional[Any] = '''translation''' in args.task A : List[Any] = calculate_bleu if calc_bleu else calculate_rouge A : List[str] = '''bleu''' if calc_bleu else '''rouge''' A : Dict = score_fn(snake_case__ , snake_case__ ) A : Optional[Any] = len(snake_case__ ) A : Tuple = time.time() - start_time A : Union[str, Any] = round(runtime / metrics['''n_obs'''] , 4 ) A : Dict = num_replicas # TODO(@stas00): add whatever metadata to metrics A : Any = save_dir.joinpath(F'{args.type_path}_{metric_name}.json' ) save_json(snake_case__ , snake_case__ , indent=snake_case__ ) print(snake_case__ ) write_txt_file(snake_case__ , save_dir.joinpath(F'{args.type_path}_generations.txt' ) ) if args.debug: write_txt_file(snake_case__ , save_dir.joinpath(F'{args.type_path}.target' ) ) else: shutil.rmtree(snake_case__ ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = [] for partial_result in partial_results: records.extend(snake_case__ ) A : Optional[int] = sorted(snake_case__ , key=lambda snake_case__ : x["id"] ) A : int = [x['''pred'''] for x in records] return preds def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[int] = time.time() logger.info('''waiting for all nodes to finish''' ) A : List[Any] = None while (time.time() - start_wait) < timeout: A : Optional[Any] = list(save_dir.glob('''rank_*.json''' ) ) if len(snake_case__ ) < num_replicas: continue try: # make sure all json files are fully saved A : Union[str, Any] = lmap(snake_case__ , snake_case__ ) return json_data except JSONDecodeError: continue else: raise TimeoutError('''Rank 0 gave up on waiting for other processes''' ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
368
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM A : Dict = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE ): A : List[Any] = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: A : Optional[int] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size: raise ValueError( F'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch' F' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) A : str = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A : Any = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A : int = self.scheduler.step( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , use_clipped_model_output=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample A : Dict = (image / 2 + 0.5).clamp(0 , 1 ) A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A : int = self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
311
0
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowercase : Optional[int] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''audio_values''', '''audio_mask'''] def __init__( self , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=[16, 16] , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=44100 , SCREAMING_SNAKE_CASE=86 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=0.0 , **SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" super().__init__( feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : List[Any] = spectrogram_length A : str = num_channels A : Optional[Any] = patch_size A : int = feature_size // self.patch_size[1] A : int = n_fft A : Optional[int] = sampling_rate // hop_length_to_sampling_rate A : int = sampling_rate A : Optional[int] = padding_value A : List[Any] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , ).T def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> np.ndarray: """simple docstring""" A : Union[str, Any] = spectrogram( SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , ) A : Tuple = log_spec[:, :-1] A : Any = log_spec - 20.0 A : Dict = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , **SCREAMING_SNAKE_CASE , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( '''This feature extractor is set to support sampling rate''' F' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled' F' with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A : Any = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) A : Optional[Any] = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A : Dict = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ): A : List[str] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A : Optional[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A : int = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis A : str = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , SCREAMING_SNAKE_CASE ): A : Any = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask A : List[Any] = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: A : str = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] A : List[Any] = np.array(SCREAMING_SNAKE_CASE ).astype(np.floataa ) # convert into correct format for padding A : Optional[Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch A : str = np.ones([len(SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) A : str = padded_audio_features * self.padding_value for i in range(len(SCREAMING_SNAKE_CASE ) ): A : List[Any] = audio_features[i] A : Tuple = feature # return as BatchFeature if return_attention_mask: A : Optional[int] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask} else: A : Union[str, Any] = {'''audio_values''': padded_audio_features} A : int = BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE ) return encoded_inputs
369
'''simple docstring''' from __future__ import annotations from random import random class A : def __init__( self , SCREAMING_SNAKE_CASE = None ) -> Tuple: """simple docstring""" A : Optional[Any] = value A : Any = random() A : Node | None = None A : Node | None = None def __repr__( self ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return F'\'{self.value}: {self.prior:.5}\'' else: return pformat( {F'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 ) def __str__( self ) -> str: """simple docstring""" A : Optional[Any] = str(self.value ) + ''' ''' A : Union[str, Any] = str(self.left or '''''' ) A : Any = str(self.right or '''''' ) return value + left + right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: A, A : Any = split(root.left , snake_case__ ) return left, root else: A, A : Optional[int] = split(root.right , snake_case__ ) return root, right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: A : List[str] = merge(left.right , snake_case__ ) return left else: A : Tuple = merge(snake_case__ , right.left ) return right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : List[Any] = Node(snake_case__ ) A, A : Tuple = split(snake_case__ , snake_case__ ) return merge(merge(snake_case__ , snake_case__ ) , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A, A : Dict = split(snake_case__ , value - 1 ) A, A : Any = split(snake_case__ , snake_case__ ) return merge(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not root: # None return else: inorder(root.left ) print(root.value , end=''',''' ) inorder(root.right ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' for arg in args.split(): if arg[0] == "+": A : int = insert(snake_case__ , int(arg[1:] ) ) elif arg[0] == "-": A : int = erase(snake_case__ , int(arg[1:] ) ) else: print('''Unknown command''' ) return root def lowerCAmelCase_ ( ): '''simple docstring''' A : Union[str, Any] = None print( '''enter numbers to create a tree, + value to add value into treap, ''' '''- value to erase all nodes with value. \'q\' to quit. ''' ) A : Optional[int] = input() while args != "q": A : str = interact_treap(snake_case__ , snake_case__ ) print(snake_case__ ) A : Union[str, Any] = input() print('''good by!''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
311
0
'''simple docstring''' import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class A ( __snake_case ): # to overwrite at feature extractactor specific tests __magic_name__ = None __magic_name__ = None @property def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return self.feat_extract_tester.prepare_feat_extract_dict() def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''feature_size''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''sampling_rate''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''padding_value''' ) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : List[Any] = self.feat_extract_tester.prepare_inputs_for_common() A : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) A : Any = feat_extract.model_input_names[0] A : Tuple = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) for x, y in zip(SCREAMING_SNAKE_CASE , processed_features[input_name] ) ) ) A : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=SCREAMING_SNAKE_CASE ) A : int = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' ) A : Optional[Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: A : Any = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=SCREAMING_SNAKE_CASE ) A : int = self.feature_extraction_class(**self.feat_extract_dict ) A : Tuple = feat_extract.model_input_names[0] A : Any = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' ) A : Dict = processed_features[input_name] if len(batch_features_input.shape ) < 3: A : str = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=SCREAMING_SNAKE_CASE ) A : str = self.feature_extraction_class(**self.feat_extract_dict ) A : Union[str, Any] = feat_extract.model_input_names[0] A : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' ) A : Optional[Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: A : Optional[Any] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=False ) -> Optional[int]: """simple docstring""" def _inputs_have_equal_length(SCREAMING_SNAKE_CASE ): A : Optional[Any] = len(input[0] ) for input_slice in input[1:]: if len(SCREAMING_SNAKE_CASE ) != length: return False return True def _inputs_are_equal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ): return False for input_slice_a, input_slice_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): if not np.allclose(np.asarray(SCREAMING_SNAKE_CASE ) , np.asarray(SCREAMING_SNAKE_CASE ) , atol=1e-3 ): return False return True A : int = self.feature_extraction_class(**self.feat_extract_dict ) A : Tuple = self.feat_extract_tester.prepare_inputs_for_common(numpify=SCREAMING_SNAKE_CASE ) A : Any = feat_extract.model_input_names[0] A : List[Any] = BatchFeature({input_name: speech_inputs} ) A : Tuple = self.feat_extract_tester.seq_length_diff A : Any = self.feat_extract_tester.max_seq_length + pad_diff A : List[Any] = self.feat_extract_tester.min_seq_length A : Any = self.feat_extract_tester.batch_size A : Union[str, Any] = self.feat_extract_tester.feature_size # test padding for List[int] + numpy A : Tuple = feat_extract.pad(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = input_a[input_name] A : Union[str, Any] = feat_extract.pad(SCREAMING_SNAKE_CASE , padding='''longest''' ) A : Dict = input_a[input_name] A : Optional[Any] = feat_extract.pad(SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=len(speech_inputs[-1] ) ) A : Optional[Any] = input_a[input_name] A : List[str] = feat_extract.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''np''' ) A : List[str] = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(SCREAMING_SNAKE_CASE ): feat_extract.pad(SCREAMING_SNAKE_CASE , padding='''max_length''' )[input_name] A : int = feat_extract.pad( SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=SCREAMING_SNAKE_CASE , return_tensors='''np''' ) A : Any = input_a[input_name] self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE ) ) self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE ) ) self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE ) ) self.assertTrue(_inputs_are_equal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy A : str = feat_extract.pad(SCREAMING_SNAKE_CASE , pad_to_multiple_of=10 ) A : Tuple = input_a[input_name] A : str = feat_extract.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , pad_to_multiple_of=10 ) A : Dict = input_a[input_name] A : Union[str, Any] = feat_extract.pad( SCREAMING_SNAKE_CASE , padding='''max_length''' , pad_to_multiple_of=10 , max_length=SCREAMING_SNAKE_CASE ) A : List[str] = input_a[input_name] A : Optional[int] = feat_extract.pad( SCREAMING_SNAKE_CASE , padding='''max_length''' , pad_to_multiple_of=10 , max_length=SCREAMING_SNAKE_CASE , return_tensors='''np''' , ) A : int = input_a[input_name] self.assertTrue(all(len(SCREAMING_SNAKE_CASE ) % 10 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) A : Optional[int] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(SCREAMING_SNAKE_CASE ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct A : Any = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1e-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1e-3 ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=False ) -> Optional[Any]: """simple docstring""" def _inputs_have_equal_length(SCREAMING_SNAKE_CASE ): A : str = len(input[0] ) for input_slice in input[1:]: if len(SCREAMING_SNAKE_CASE ) != length: return False return True def _inputs_are_equal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ): return False for input_slice_a, input_slice_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): if not np.allclose(np.asarray(SCREAMING_SNAKE_CASE ) , np.asarray(SCREAMING_SNAKE_CASE ) , atol=1e-3 ): return False return True A : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) A : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=SCREAMING_SNAKE_CASE ) A : Tuple = feat_extract.model_input_names[0] A : int = BatchFeature({input_name: speech_inputs} ) # truncate to smallest A : str = feat_extract.pad( SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=SCREAMING_SNAKE_CASE ) A : int = input_a[input_name] A : str = feat_extract.pad(SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=len(speech_inputs[0] ) ) A : List[Any] = input_a[input_name] self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE ) ) self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE ) ) # truncate to smallest with np A : int = feat_extract.pad( SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=SCREAMING_SNAKE_CASE , ) A : Any = input_a[input_name] A : Any = feat_extract.pad( SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' ) A : Tuple = input_a[input_name] self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE ) ) # truncate to middle A : Optional[int] = feat_extract.pad( SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=SCREAMING_SNAKE_CASE , return_tensors='''np''' , ) A : Any = input_a[input_name] A : Tuple = feat_extract.pad( SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=SCREAMING_SNAKE_CASE ) A : int = input_a[input_name] A : List[str] = feat_extract.pad( SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' ) A : List[Any] = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE ) ) self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE ) ) self.assertTrue(_inputs_are_equal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(SCREAMING_SNAKE_CASE ): feat_extract.pad(SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(SCREAMING_SNAKE_CASE ): feat_extract.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , truncation=SCREAMING_SNAKE_CASE )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(SCREAMING_SNAKE_CASE ): feat_extract.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , truncation=SCREAMING_SNAKE_CASE )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(SCREAMING_SNAKE_CASE ): feat_extract.pad(SCREAMING_SNAKE_CASE , padding='''max_length''' , truncation=SCREAMING_SNAKE_CASE )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy A : str = 12 A : List[str] = feat_extract.pad( SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , ) A : List[Any] = input_a[input_name] A : str = feat_extract.pad( SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=SCREAMING_SNAKE_CASE , ) A : Dict = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of A : Tuple = len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: A : int = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE ) ) self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" self._check_padding(numpify=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" self._check_padding(numpify=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" self._check_truncation(numpify=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" self._check_truncation(numpify=SCREAMING_SNAKE_CASE ) @require_torch def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : str = self.feature_extraction_class(**self.feat_extract_dict ) A : List[Any] = self.feat_extract_tester.prepare_inputs_for_common() A : Tuple = feat_extract.model_input_names[0] A : int = BatchFeature({input_name: speech_inputs} ) A : Union[str, Any] = feat_extract.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''np''' )[input_name] A : List[Any] = feat_extract.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 ) @require_tf def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : str = self.feature_extraction_class(**self.feat_extract_dict ) A : Tuple = self.feat_extract_tester.prepare_inputs_for_common() A : List[Any] = feat_extract.model_input_names[0] A : str = BatchFeature({input_name: speech_inputs} ) A : Dict = feat_extract.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''np''' )[input_name] A : Optional[int] = feat_extract.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''tf''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Any = self.feat_extract_dict A : Tuple = True A : List[str] = self.feature_extraction_class(**SCREAMING_SNAKE_CASE ) A : List[Any] = self.feat_extract_tester.prepare_inputs_for_common() A : Any = [len(SCREAMING_SNAKE_CASE ) for x in speech_inputs] A : Dict = feat_extract.model_input_names[0] A : Dict = BatchFeature({input_name: speech_inputs} ) A : Dict = feat_extract.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''np''' ) self.assertIn('''attention_mask''' , SCREAMING_SNAKE_CASE ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[Any] = self.feat_extract_dict A : Optional[int] = True A : List[Any] = self.feature_extraction_class(**SCREAMING_SNAKE_CASE ) A : List[Any] = self.feat_extract_tester.prepare_inputs_for_common() A : Tuple = [len(SCREAMING_SNAKE_CASE ) for x in speech_inputs] A : Union[str, Any] = feat_extract.model_input_names[0] A : int = BatchFeature({input_name: speech_inputs} ) A : List[str] = min(SCREAMING_SNAKE_CASE ) A : List[str] = feat_extract.pad( SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , return_tensors='''np''' ) self.assertIn('''attention_mask''' , SCREAMING_SNAKE_CASE ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
370
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=sys.maxsize ) -> Union[str, Any]: """simple docstring""" A : Tuple = '''bilinear''' A : Optional[int] = max_size A : Dict = short_edge_length def __call__( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Tuple = [] for img in imgs: A, A : str = img.shape[:2] # later: provide list and randomly choose index for resize A : Union[str, Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img A : int = size * 1.0 / min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if h < w: A, A : Tuple = size, scale * w else: A, A : str = scale * h, size if max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) > self.max_size: A : List[str] = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Tuple = newh * scale A : int = neww * scale A : List[str] = int(neww + 0.5 ) A : int = int(newh + 0.5 ) if img.dtype == np.uinta: A : Dict = Image.fromarray(SCREAMING_SNAKE_CASE ) A : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) A : str = np.asarray(SCREAMING_SNAKE_CASE ) else: A : Dict = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw A : List[Any] = nn.functional.interpolate( SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE ).squeeze(0 ) img_augs.append(SCREAMING_SNAKE_CASE ) return img_augs class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) A : str = cfg.INPUT.FORMAT A : int = cfg.SIZE_DIVISIBILITY A : Optional[int] = cfg.PAD_VALUE A : Dict = cfg.INPUT.MAX_SIZE_TEST A : Optional[Any] = cfg.MODEL.DEVICE A : Dict = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) A : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) A : str = lambda SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : Union[str, Any] = tuple(max(SCREAMING_SNAKE_CASE ) for s in zip(*[img.shape for img in images] ) ) A : List[str] = [im.shape[-2:] for im in images] A : Optional[Any] = [ nn.functional.pad( SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] return torch.stack(SCREAMING_SNAKE_CASE ), torch.tensor(SCREAMING_SNAKE_CASE ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : str = [images] if single_image: assert len(SCREAMING_SNAKE_CASE ) == 1 for i in range(len(SCREAMING_SNAKE_CASE ) ): if isinstance(images[i] , torch.Tensor ): images.insert(SCREAMING_SNAKE_CASE , images.pop(SCREAMING_SNAKE_CASE ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge A : Tuple = torch.tensor([im.shape[:2] for im in images] ) A : Dict = self.aug(SCREAMING_SNAKE_CASE ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic A : Tuple = [self.normalizer(SCREAMING_SNAKE_CASE ) for x in images] # now pad them to do the following operations A, A : Optional[int] = self.pad(SCREAMING_SNAKE_CASE ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad A : Tuple = torch.true_divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' assert torch.isfinite(snake_case__ ).all(), "Box tensor contains infinite or NaN!" A, A : str = box_size tensor[:, 0].clamp_(min=0 , max=snake_case__ ) tensor[:, 1].clamp_(min=0 , max=snake_case__ ) tensor[:, 2].clamp_(min=0 , max=snake_case__ ) tensor[:, 3].clamp_(min=0 , max=snake_case__ )
311
0
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase : int = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[int] = [ 'MRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MraForMaskedLM', 'MraForMultipleChoice', 'MraForQuestionAnswering', 'MraForSequenceClassification', 'MraForTokenClassification', 'MraLayer', 'MraModel', 'MraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
371
'''simple docstring''' import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": lowercase : Tuple = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '--original_config_file', default=None, type=str, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--scheduler_type', default='pndm', type=str, help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']', ) parser.add_argument( '--pipeline_type', default=None, type=str, help=( 'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'' '. If `None` pipeline will be automatically inferred.' ), ) parser.add_argument( '--image_size', default=None, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--prediction_type', default=None, type=str, help=( 'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable' ' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') parser.add_argument( '--stable_unclip', type=str, default=None, required=False, help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.', ) parser.add_argument( '--stable_unclip_prior', type=str, default=None, required=False, help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.', ) parser.add_argument( '--clip_stats_path', type=str, help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.', required=False, ) parser.add_argument( '--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.' ) parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--vae_path', type=str, default=None, required=False, help='Set to a path, hub id to an already converted vae to not convert it again.', ) lowercase : Tuple = parser.parse_args() lowercase : Union[str, Any] = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
311
0
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase ) -> int: assert column_title.isupper() snake_case_ = 0 snake_case_ = len(UpperCAmelCase ) - 1 snake_case_ = 0 while index >= 0: snake_case_ = (ord(column_title[index] ) - 64) * pow(26 , UpperCAmelCase ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
312
"""simple docstring""" import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __UpperCamelCase = False __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = '''ybelkada/fonts''' def UpperCAmelCase ( ) -> Dict: if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ' 'Pix2StructImageProcessor. Please upgrade torch.' ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str: requires_backends(UpperCAmelCase , ['torch'] ) _check_torch_version() snake_case_ = image_tensor.unsqueeze(0 ) snake_case_ = torch.nn.functional.unfold(UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) ) snake_case_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase , UpperCAmelCase , -1 ) snake_case_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 36 , UpperCAmelCase = "black" , UpperCAmelCase = "white" , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Image.Image: requires_backends(UpperCAmelCase , 'vision' ) # Add new lines so that each line is no more than 80 characters. snake_case_ = textwrap.TextWrapper(width=80 ) snake_case_ = wrapper.wrap(text=UpperCAmelCase ) snake_case_ = '\n'.join(UpperCAmelCase ) if font_bytes is not None and font_path is None: snake_case_ = io.BytesIO(UpperCAmelCase ) elif font_path is not None: snake_case_ = font_path else: snake_case_ = hf_hub_download(UpperCAmelCase , 'Arial.TTF' ) snake_case_ = ImageFont.truetype(UpperCAmelCase , encoding='UTF-8' , size=UpperCAmelCase ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. snake_case_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , UpperCAmelCase ) ) snake_case_ , snake_case_ , snake_case_ , snake_case_ = temp_draw.textbbox((0, 0) , UpperCAmelCase , UpperCAmelCase ) # Create the actual image with a bit of padding around the text. snake_case_ = text_width + left_padding + right_padding snake_case_ = text_height + top_padding + bottom_padding snake_case_ = Image.new('RGB' , (image_width, image_height) , UpperCAmelCase ) snake_case_ = ImageDraw.Draw(UpperCAmelCase ) draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase , fill=UpperCAmelCase , font=UpperCAmelCase ) return image def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any: requires_backends(UpperCAmelCase , 'vision' ) # Convert to PIL image if necessary snake_case_ = to_pil_image(UpperCAmelCase ) snake_case_ = render_text(UpperCAmelCase , **UpperCAmelCase ) snake_case_ = max(header_image.width , image.width ) snake_case_ = int(image.height * (new_width / image.width) ) snake_case_ = int(header_image.height * (new_width / header_image.width) ) snake_case_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary snake_case_ = to_numpy_array(UpperCAmelCase ) if infer_channel_dimension_format(UpperCAmelCase ) == ChannelDimension.LAST: snake_case_ = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST ) return new_image class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["flattened_patches"] def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 2048, lowerCAmelCase__ = False, **lowerCAmelCase__, ) -> None: super().__init__(**lowerCAmelCase__) snake_case_ = patch_size if patch_size is not None else {'height': 16, 'width': 16} snake_case_ = do_normalize snake_case_ = do_convert_rgb snake_case_ = max_patches snake_case_ = is_vqa def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> np.ndarray: requires_backends(self.extract_flattened_patches, 'torch') _check_torch_version() # convert to torch snake_case_ = to_channel_dimension_format(lowerCAmelCase__, ChannelDimension.FIRST) snake_case_ = torch.from_numpy(lowerCAmelCase__) snake_case_ , snake_case_ = patch_size['height'], patch_size['width'] snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__) # maximize scale s.t. snake_case_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width)) snake_case_ = max(min(math.floor(scale * image_height / patch_height), lowerCAmelCase__), 1) snake_case_ = max(min(math.floor(scale * image_width / patch_width), lowerCAmelCase__), 1) snake_case_ = max(num_feasible_rows * patch_height, 1) snake_case_ = max(num_feasible_cols * patch_width, 1) snake_case_ = torch.nn.functional.interpolate( image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=lowerCAmelCase__, antialias=lowerCAmelCase__, ).squeeze(0) # [1, rows, columns, patch_height * patch_width * image_channels] snake_case_ = torch_extract_patches(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) snake_case_ = patches.shape snake_case_ = patches_shape[1] snake_case_ = patches_shape[2] snake_case_ = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] snake_case_ = patches.reshape([rows * columns, depth]) # [rows * columns, 1] snake_case_ = torch.arange(lowerCAmelCase__).reshape([rows, 1]).repeat(1, lowerCAmelCase__).reshape([rows * columns, 1]) snake_case_ = torch.arange(lowerCAmelCase__).reshape([1, columns]).repeat(lowerCAmelCase__, 1).reshape([rows * columns, 1]) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] snake_case_ = row_ids.to(torch.floataa) snake_case_ = col_ids.to(torch.floataa) # [rows * columns, 2 + patch_height * patch_width * image_channels] snake_case_ = torch.cat([row_ids, col_ids, patches], -1) # [max_patches, 2 + patch_height * patch_width * image_channels] snake_case_ = torch.nn.functional.pad(lowerCAmelCase__, [0, 0, 0, max_patches - (rows * columns)]).float() snake_case_ = to_numpy_array(lowerCAmelCase__) return result def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray: if image.dtype == np.uinta: snake_case_ = image.astype(np.floataa) # take mean across the whole `image` snake_case_ = np.mean(lowerCAmelCase__) snake_case_ = np.std(lowerCAmelCase__) snake_case_ = max(lowerCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape))) return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> ImageInput: snake_case_ = do_normalize if do_normalize is not None else self.do_normalize snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb snake_case_ = patch_size if patch_size is not None else self.patch_size snake_case_ = max_patches if max_patches is not None else self.max_patches snake_case_ = self.is_vqa if kwargs.get('data_format', lowerCAmelCase__) is not None: raise ValueError('data_format is not an accepted input as the outputs are ') snake_case_ = make_list_of_images(lowerCAmelCase__) if not valid_images(lowerCAmelCase__): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') # PIL RGBA images are converted to RGB if do_convert_rgb: snake_case_ = [convert_to_rgb(lowerCAmelCase__) for image in images] # All transformations expect numpy arrays. snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images] if is_vqa: if header_text is None: raise ValueError('A header text must be provided for VQA models.') snake_case_ = kwargs.pop('font_bytes', lowerCAmelCase__) snake_case_ = kwargs.pop('font_path', lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__): snake_case_ = [header_text] * len(lowerCAmelCase__) snake_case_ = [ render_header(lowerCAmelCase__, header_text[i], font_bytes=lowerCAmelCase__, font_path=lowerCAmelCase__) for i, image in enumerate(lowerCAmelCase__) ] if do_normalize: snake_case_ = [self.normalize(image=lowerCAmelCase__) for image in images] # convert to torch tensor and permute snake_case_ = [ self.extract_flattened_patches(image=lowerCAmelCase__, max_patches=lowerCAmelCase__, patch_size=lowerCAmelCase__) for image in images ] # create attention mask in numpy snake_case_ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images] snake_case_ = BatchFeature( data={'flattened_patches': images, 'attention_mask': attention_masks}, tensor_type=lowerCAmelCase__) return encoded_outputs
312
1
"""simple docstring""" import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: # Initialise PyTorch model snake_case_ = LxmertConfig.from_json_file(UpperCAmelCase ) print(f'Building PyTorch model from configuration: {config}' ) snake_case_ = LxmertForPreTraining(UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , UpperCAmelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __UpperCamelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
312
"""simple docstring""" from math import pi def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float: return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
312
1
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __UpperCamelCase = logging.get_logger(__name__) class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["input_features"] def __init__( self, lowerCAmelCase__=80, lowerCAmelCase__=1_6000, lowerCAmelCase__=160, lowerCAmelCase__=30, lowerCAmelCase__=400, lowerCAmelCase__=0.0, lowerCAmelCase__=False, **lowerCAmelCase__, ) -> Dict: super().__init__( feature_size=lowerCAmelCase__, sampling_rate=lowerCAmelCase__, padding_value=lowerCAmelCase__, return_attention_mask=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = n_fft snake_case_ = hop_length snake_case_ = chunk_length snake_case_ = chunk_length * sampling_rate snake_case_ = self.n_samples // hop_length snake_case_ = sampling_rate snake_case_ = mel_filter_bank( num_frequency_bins=1 + n_fft // 2, num_mel_filters=lowerCAmelCase__, min_frequency=0.0, max_frequency=8000.0, sampling_rate=lowerCAmelCase__, norm='slaney', mel_scale='slaney', ) def a_ ( self, lowerCAmelCase__) -> np.ndarray: snake_case_ = spectrogram( lowerCAmelCase__, window_function(self.n_fft, 'hann'), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters, log_mel='log10', ) snake_case_ = log_spec[:, :-1] snake_case_ = np.maximum(lowerCAmelCase__, log_spec.max() - 8.0) snake_case_ = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def a_ ( lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = 0.0) -> List[np.ndarray]: if attention_mask is not None: snake_case_ = np.array(lowerCAmelCase__, np.intaa) snake_case_ = [] for vector, length in zip(lowerCAmelCase__, attention_mask.sum(-1)): snake_case_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7) if length < normed_slice.shape[0]: snake_case_ = padding_value normed_input_values.append(lowerCAmelCase__) else: snake_case_ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values] return normed_input_values def __call__( self, lowerCAmelCase__, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = "max_length", lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a' f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input' f' was sampled with {self.sampling_rate} and not {sampling_rate}.') else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.') snake_case_ = isinstance(lowerCAmelCase__, np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}') snake_case_ = is_batched_numpy or ( isinstance(lowerCAmelCase__, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list))) ) if is_batched: snake_case_ = [np.asarray([speech], dtype=np.floataa).T for speech in raw_speech] elif not is_batched and not isinstance(lowerCAmelCase__, np.ndarray): snake_case_ = np.asarray(lowerCAmelCase__, dtype=np.floataa) elif isinstance(lowerCAmelCase__, np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): snake_case_ = raw_speech.astype(np.floataa) # always return batch if not is_batched: snake_case_ = [np.asarray([raw_speech]).T] snake_case_ = BatchFeature({'input_features': raw_speech}) # convert into correct format for padding snake_case_ = self.pad( lowerCAmelCase__, padding=lowerCAmelCase__, max_length=max_length if max_length else self.n_samples, truncation=lowerCAmelCase__, pad_to_multiple_of=lowerCAmelCase__, return_attention_mask=return_attention_mask or do_normalize, ) # zero-mean and unit-variance normalization if do_normalize: snake_case_ = self.zero_mean_unit_var_norm( padded_inputs['input_features'], attention_mask=padded_inputs['attention_mask'], padding_value=self.padding_value, ) snake_case_ = np.stack(padded_inputs['input_features'], axis=0) # make sure list is in array format snake_case_ = padded_inputs.get('input_features').transpose(2, 0, 1) snake_case_ = [self._np_extract_fbank_features(lowerCAmelCase__) for waveform in input_features[0]] if isinstance(input_features[0], lowerCAmelCase__): snake_case_ = [np.asarray(lowerCAmelCase__, dtype=np.floataa) for feature in input_features] else: snake_case_ = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) snake_case_ = padded_inputs['attention_mask'][:, :: self.hop_length] if return_tensors is not None: snake_case_ = padded_inputs.convert_to_tensors(lowerCAmelCase__) return padded_inputs def a_ ( self) -> Dict[str, Any]: snake_case_ = copy.deepcopy(self.__dict__) snake_case_ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
312
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': ( '''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json''' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "trajectory_transformer" SCREAMING_SNAKE_CASE_ = ["past_key_values"] SCREAMING_SNAKE_CASE_ = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, lowerCAmelCase__=100, lowerCAmelCase__=5, lowerCAmelCase__=1, lowerCAmelCase__=1, lowerCAmelCase__=249, lowerCAmelCase__=6, lowerCAmelCase__=17, lowerCAmelCase__=25, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=128, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0006, lowerCAmelCase__=512, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=True, lowerCAmelCase__=1, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, **lowerCAmelCase__, ) -> Optional[Any]: snake_case_ = vocab_size snake_case_ = action_weight snake_case_ = reward_weight snake_case_ = value_weight snake_case_ = max_position_embeddings snake_case_ = block_size snake_case_ = action_dim snake_case_ = observation_dim snake_case_ = transition_dim snake_case_ = learning_rate snake_case_ = n_layer snake_case_ = n_head snake_case_ = n_embd snake_case_ = embd_pdrop snake_case_ = attn_pdrop snake_case_ = resid_pdrop snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = kaiming_initializer_range snake_case_ = use_cache super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
312
1
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase : def __init__( self, lowerCAmelCase__, lowerCAmelCase__=13, lowerCAmelCase__=7, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=99, lowerCAmelCase__=24, lowerCAmelCase__=2, lowerCAmelCase__=6, lowerCAmelCase__=37, lowerCAmelCase__="gelu", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=512, lowerCAmelCase__=16, lowerCAmelCase__=2, lowerCAmelCase__=0.02, lowerCAmelCase__=3, lowerCAmelCase__=None, lowerCAmelCase__=1000, ) -> List[Any]: snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = scope snake_case_ = range_bbox def a_ ( self) -> Tuple: snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) snake_case_ = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: snake_case_ = bbox[i, j, 3] snake_case_ = bbox[i, j, 1] snake_case_ = t if bbox[i, j, 2] < bbox[i, j, 0]: snake_case_ = bbox[i, j, 2] snake_case_ = bbox[i, j, 0] snake_case_ = t snake_case_ = None if self.use_input_mask: snake_case_ = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size], self.type_sequence_label_size) snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.num_labels) snake_case_ = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def a_ ( self) -> Optional[Any]: return LiltConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> List[str]: snake_case_ = LiltModel(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() snake_case_ = model(lowerCAmelCase__, bbox=lowerCAmelCase__, attention_mask=lowerCAmelCase__, token_type_ids=lowerCAmelCase__) snake_case_ = model(lowerCAmelCase__, bbox=lowerCAmelCase__, token_type_ids=lowerCAmelCase__) snake_case_ = model(lowerCAmelCase__, bbox=lowerCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> List[Any]: snake_case_ = self.num_labels snake_case_ = LiltForTokenClassification(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() snake_case_ = model( lowerCAmelCase__, bbox=lowerCAmelCase__, attention_mask=lowerCAmelCase__, token_type_ids=lowerCAmelCase__, labels=lowerCAmelCase__) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> Optional[int]: snake_case_ = LiltForQuestionAnswering(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() snake_case_ = model( lowerCAmelCase__, bbox=lowerCAmelCase__, attention_mask=lowerCAmelCase__, token_type_ids=lowerCAmelCase__, start_positions=lowerCAmelCase__, end_positions=lowerCAmelCase__, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def a_ ( self) -> Dict: snake_case_ = self.prepare_config_and_inputs() ( ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ) = config_and_inputs snake_case_ = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ = ( { "feature-extraction": LiltModel, "question-answering": LiltForQuestionAnswering, "text-classification": LiltForSequenceClassification, "token-classification": LiltForTokenClassification, "zero-shot": LiltForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> str: return True def a_ ( self) -> str: snake_case_ = LiltModelTester(self) snake_case_ = ConfigTester(self, config_class=lowerCAmelCase__, hidden_size=37) def a_ ( self) -> int: self.config_tester.run_common_tests() def a_ ( self) -> int: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__) def a_ ( self) -> Dict: snake_case_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case_ = type self.model_tester.create_and_check_model(*lowerCAmelCase__) def a_ ( self) -> int: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__) def a_ ( self) -> Optional[Any]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__) @slow def a_ ( self) -> Any: for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = LiltModel.from_pretrained(lowerCAmelCase__) self.assertIsNotNone(lowerCAmelCase__) @require_torch @slow class UpperCamelCase ( unittest.TestCase ): def a_ ( self) -> Optional[Any]: snake_case_ = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base').to(lowerCAmelCase__) snake_case_ = torch.tensor([[1, 2]], device=lowerCAmelCase__) snake_case_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=lowerCAmelCase__) # forward pass with torch.no_grad(): snake_case_ = model(input_ids=lowerCAmelCase__, bbox=lowerCAmelCase__) snake_case_ = torch.Size([1, 2, 768]) snake_case_ = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]], device=lowerCAmelCase__, ) self.assertTrue(outputs.last_hidden_state.shape, lowerCAmelCase__) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], lowerCAmelCase__, atol=1e-3))
312
"""simple docstring""" from ..utils import DummyObject, requires_backends class UpperCamelCase ( metaclass=lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["keras_nlp"] def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int: requires_backends(self, ['keras_nlp'])
312
1
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase ) -> bool: snake_case_ = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
312
"""simple docstring""" import os import numpy import onnx def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = a.name snake_case_ = b.name snake_case_ = '' snake_case_ = '' snake_case_ = a == b snake_case_ = name_a snake_case_ = name_b return res def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase , UpperCAmelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any: snake_case_ = list(model.graph.initializer ) snake_case_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i snake_case_ = inits[i].name snake_case_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]: snake_case_ = os.path.dirname(UpperCAmelCase ) snake_case_ = os.path.basename(UpperCAmelCase ) snake_case_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) ) snake_case_ = list(model.graph.initializer ) snake_case_ = set() snake_case_ = {} snake_case_ = [] snake_case_ = 0 for i in range(len(UpperCAmelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase ) dup_set.add(UpperCAmelCase ) snake_case_ = inits[j].data_type snake_case_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , UpperCAmelCase ) total_reduced_size += mem_size snake_case_ = inits[i].name snake_case_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase ) else: snake_case_ = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) snake_case_ = sorted(UpperCAmelCase ) _remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) snake_case_ = 'optimized_' + model_file_name snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) onnx.save(UpperCAmelCase , UpperCAmelCase ) return new_model
312
1
"""simple docstring""" import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( 'files' , [ ['full:README.md', 'dataset_infos.json'], ['empty:README.md', 'dataset_infos.json'], ['dataset_infos.json'], ['full:README.md'], ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' ) if "full:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('---\ndataset_info:\n dataset_size: 42\n---' ) if "empty:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f: f.write('{"default": {"dataset_size": 42}}' ) snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( 'dataset_info' , [ DatasetInfo(), DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ), ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: snake_case_ = str(UpperCAmelCase ) dataset_info.write_to_directory(UpperCAmelCase ) snake_case_ = DatasetInfo.from_directory(UpperCAmelCase ) assert dataset_info == reloaded assert os.path.exists(os.path.join(UpperCAmelCase , 'dataset_info.json' ) ) def UpperCAmelCase ( ) -> Union[str, Any]: snake_case_ = DatasetInfo( description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) snake_case_ = dataset_info._to_yaml_dict() assert sorted(UpperCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) snake_case_ = yaml.safe_dump(UpperCAmelCase ) snake_case_ = yaml.safe_load(UpperCAmelCase ) assert dataset_info_yaml_dict == reloaded def UpperCAmelCase ( ) -> Optional[Any]: snake_case_ = DatasetInfo() snake_case_ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( 'dataset_infos_dict' , [ DatasetInfosDict(), DatasetInfosDict({'default': DatasetInfo()} ), DatasetInfosDict({'my_config_name': DatasetInfo()} ), DatasetInfosDict( { 'default': DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ) } ), DatasetInfosDict( { 'v1': DatasetInfo(dataset_size=42 ), 'v2': DatasetInfo(dataset_size=1337 ), } ), ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = str(UpperCAmelCase ) dataset_infos_dict.write_to_directory(UpperCAmelCase ) snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): snake_case_ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(UpperCAmelCase , 'README.md' ) )
312
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: __UpperCamelCase = None __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json''' ), }, } __UpperCamelCase = { '''moussaKam/mbarthez''': 1024, '''moussaKam/barthez''': 1024, '''moussaKam/barthez-orangesum-title''': 1024, } __UpperCamelCase = '''▁''' class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ = BarthezTokenizer def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]: # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token super().__init__( lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = vocab_file snake_case_ = False if not self.vocab_file else True def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.') if not os.path.isdir(lowerCAmelCase__): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return snake_case_ = os.path.join( lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__): copyfile(self.vocab_file, lowerCAmelCase__) return (out_vocab_file,)
312
1
"""simple docstring""" import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) __UpperCamelCase = logging.getLogger() def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]: snake_case_ = {} snake_case_ = os.path.join(UpperCAmelCase , 'all_results.json' ) if os.path.exists(UpperCAmelCase ): with open(UpperCAmelCase , 'r' ) as f: snake_case_ = json.load(UpperCAmelCase ) else: raise ValueError(f'can\'t find {path}' ) return results __UpperCamelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class UpperCamelCase ( lowerCAmelCase__ ): def a_ ( self) -> List[str]: import xla_spawn snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = f'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split() with patch.object(lowerCAmelCase__, 'argv', lowerCAmelCase__): snake_case_ = time() xla_spawn.main() snake_case_ = time() snake_case_ = get_results(lowerCAmelCase__) self.assertGreaterEqual(result['eval_accuracy'], 0.75) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start, 500) def a_ ( self) -> Dict: import xla_spawn snake_case_ = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split() with patch.object(lowerCAmelCase__, 'argv', lowerCAmelCase__): xla_spawn.main()
312
"""simple docstring""" import functools def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int: # Validation if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for day in days ): raise ValueError('The parameter days should be a list of integers' ) if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for cost in costs ): raise ValueError('The parameter costs should be a list of three integers' ) if len(UpperCAmelCase ) == 0: return 0 if min(UpperCAmelCase ) <= 0: raise ValueError('All days elements should be greater than 0' ) if max(UpperCAmelCase ) >= 366: raise ValueError('All days elements should be less than 366' ) snake_case_ = set(UpperCAmelCase ) @functools.cache def dynamic_programming(UpperCAmelCase ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
312
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''', # See all ViT models at https://huggingface.co/models?filter=vit } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "vit" def __init__( self, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=3072, lowerCAmelCase__="gelu", lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=224, lowerCAmelCase__=16, lowerCAmelCase__=3, lowerCAmelCase__=True, lowerCAmelCase__=16, **lowerCAmelCase__, ) -> List[str]: super().__init__(**lowerCAmelCase__) snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = qkv_bias snake_case_ = encoder_stride class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = version.parse("1.11" ) @property def a_ ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def a_ ( self) -> float: return 1e-4
312
"""simple docstring""" import copy import re class UpperCamelCase : SCREAMING_SNAKE_CASE_ = "hp" SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = None @classmethod def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__) -> Tuple: snake_case_ = prefix snake_case_ = defaults cls.build_naming_info() @staticmethod def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]: if len(lowerCAmelCase__) == 0: return "" snake_case_ = None if any(char.isdigit() for char in word): raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number') if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1, len(lowerCAmelCase__) + 1): snake_case_ = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: snake_case_ = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(lowerCAmelCase__): snake_case_ = '' while integer != 0: snake_case_ = chr(ord('A') + integer % 10) + s integer //= 10 return s snake_case_ = 0 while True: snake_case_ = word + '#' + int_to_alphabetic(lowerCAmelCase__) if sword in info["reverse_short_word"]: continue else: snake_case_ = sword break snake_case_ = short_word snake_case_ = word return short_word @staticmethod def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Dict: snake_case_ = param_name.split('_') snake_case_ = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name snake_case_ = ['', '_'] for separator in separators: snake_case_ = separator.join(lowerCAmelCase__) if shortname not in info["reverse_short_param"]: snake_case_ = shortname snake_case_ = param_name return shortname return param_name @staticmethod def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> List[Any]: snake_case_ = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = short_name snake_case_ = param_name @classmethod def a_ ( cls) -> List[str]: if cls.NAMING_INFO is not None: return snake_case_ = { 'short_word': {}, 'reverse_short_word': {}, 'short_param': {}, 'reverse_short_param': {}, } snake_case_ = list(cls.DEFAULTS.keys()) for k in field_keys: cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = info @classmethod def a_ ( cls, lowerCAmelCase__) -> List[Any]: cls.build_naming_info() assert cls.PREFIX is not None snake_case_ = [copy.copy(cls.PREFIX)] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f'You should provide a default value for the param name {k} with value {v}') if v == cls.DEFAULTS[k]: # The default value is not added to the name continue snake_case_ = cls.NAMING_INFO['short_param'][k] if isinstance(lowerCAmelCase__, lowerCAmelCase__): snake_case_ = 1 if v else 0 snake_case_ = '' if isinstance(lowerCAmelCase__, (int, float)) else '-' snake_case_ = f'{key}{sep}{v}' name.append(lowerCAmelCase__) return "_".join(lowerCAmelCase__) @classmethod def a_ ( cls, lowerCAmelCase__) -> Optional[Any]: snake_case_ = repr[len(cls.PREFIX) + 1 :] if repr == "": snake_case_ = [] else: snake_case_ = repr.split('_') snake_case_ = {} for value in values: if "-" in value: snake_case_ , snake_case_ = value.split('-') else: snake_case_ = re.sub('[0-9.]', '', lowerCAmelCase__) snake_case_ = float(re.sub('[^0-9.]', '', lowerCAmelCase__)) snake_case_ = cls.NAMING_INFO['reverse_short_param'][p_k] snake_case_ = p_v for k in cls.DEFAULTS: if k not in parameters: snake_case_ = cls.DEFAULTS[k] return parameters
312
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor __UpperCamelCase = logging.get_logger(__name__) class UpperCamelCase ( lowerCAmelCase__ ): def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> None: warnings.warn( 'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PoolFormerImageProcessor instead.', lowerCAmelCase__, ) super().__init__(*lowerCAmelCase__, **lowerCAmelCase__)
312
"""simple docstring""" import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( 'files' , [ ['full:README.md', 'dataset_infos.json'], ['empty:README.md', 'dataset_infos.json'], ['dataset_infos.json'], ['full:README.md'], ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' ) if "full:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('---\ndataset_info:\n dataset_size: 42\n---' ) if "empty:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f: f.write('{"default": {"dataset_size": 42}}' ) snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( 'dataset_info' , [ DatasetInfo(), DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ), ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: snake_case_ = str(UpperCAmelCase ) dataset_info.write_to_directory(UpperCAmelCase ) snake_case_ = DatasetInfo.from_directory(UpperCAmelCase ) assert dataset_info == reloaded assert os.path.exists(os.path.join(UpperCAmelCase , 'dataset_info.json' ) ) def UpperCAmelCase ( ) -> Union[str, Any]: snake_case_ = DatasetInfo( description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) snake_case_ = dataset_info._to_yaml_dict() assert sorted(UpperCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) snake_case_ = yaml.safe_dump(UpperCAmelCase ) snake_case_ = yaml.safe_load(UpperCAmelCase ) assert dataset_info_yaml_dict == reloaded def UpperCAmelCase ( ) -> Optional[Any]: snake_case_ = DatasetInfo() snake_case_ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( 'dataset_infos_dict' , [ DatasetInfosDict(), DatasetInfosDict({'default': DatasetInfo()} ), DatasetInfosDict({'my_config_name': DatasetInfo()} ), DatasetInfosDict( { 'default': DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ) } ), DatasetInfosDict( { 'v1': DatasetInfo(dataset_size=42 ), 'v2': DatasetInfo(dataset_size=1337 ), } ), ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = str(UpperCAmelCase ) dataset_infos_dict.write_to_directory(UpperCAmelCase ) snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): snake_case_ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(UpperCAmelCase , 'README.md' ) )
312
1
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase = { '''tokenizer_file''': { '''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''', '''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''', '''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''', '''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''', '''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''', '''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''', '''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''', }, } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ = None def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<unk>", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<pad>", lowerCAmelCase__=False, lowerCAmelCase__=False, **lowerCAmelCase__, ) -> Any: super().__init__( lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, unk_token=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, clean_up_tokenization_spaces=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space: snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type')) snake_case_ = add_prefix_space snake_case_ = pre_tok_class(**lowerCAmelCase__) snake_case_ = add_prefix_space def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding: snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with' ' pretokenized inputs.') return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding: snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with' ' pretokenized inputs.') return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__) return tuple(lowerCAmelCase__) def a_ ( self, lowerCAmelCase__) -> List[int]: snake_case_ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) + [self.eos_token_id]) if len(lowerCAmelCase__) > self.model_max_length: snake_case_ = input_ids[-self.model_max_length :] return input_ids
312
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = StableDiffusionInpaintPipeline SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS SCREAMING_SNAKE_CASE_ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess SCREAMING_SNAKE_CASE_ = frozenset([] ) def a_ ( self) -> Any: torch.manual_seed(0) snake_case_ = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase__, ) snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__) torch.manual_seed(0) snake_case_ = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0) snake_case_ = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, ) snake_case_ = CLIPTextModel(lowerCAmelCase__) snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') snake_case_ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> List[str]: # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched snake_case_ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__) snake_case_ = image.cpu().permute(0, 2, 3, 1)[0] snake_case_ = Image.fromarray(np.uinta(lowerCAmelCase__)).convert('RGB').resize((64, 64)) snake_case_ = Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((64, 64)) if str(lowerCAmelCase__).startswith('mps'): snake_case_ = torch.manual_seed(lowerCAmelCase__) else: snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__) snake_case_ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': init_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def a_ ( self) -> Dict: snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = StableDiffusionInpaintPipeline(**lowerCAmelCase__) snake_case_ = sd_pipe.to(lowerCAmelCase__) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs(lowerCAmelCase__) snake_case_ = sd_pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def a_ ( self) -> Union[str, Any]: super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): def a_ ( self) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self) -> Union[str, Any]: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png') snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png') snake_case_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench.npy') snake_case_ = 'stabilityai/stable-diffusion-2-inpainting' snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__, safety_checker=lowerCAmelCase__) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing() snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = torch.manual_seed(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def a_ ( self) -> Optional[int]: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png') snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png') snake_case_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench_fp16.npy') snake_case_ = 'stabilityai/stable-diffusion-2-inpainting' snake_case_ = StableDiffusionInpaintPipeline.from_pretrained( lowerCAmelCase__, torch_dtype=torch.floataa, safety_checker=lowerCAmelCase__, ) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing() snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = torch.manual_seed(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def a_ ( self) -> Union[str, Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png') snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png') snake_case_ = 'stabilityai/stable-diffusion-2-inpainting' snake_case_ = PNDMScheduler.from_pretrained(lowerCAmelCase__, subfolder='scheduler') snake_case_ = StableDiffusionInpaintPipeline.from_pretrained( lowerCAmelCase__, safety_checker=lowerCAmelCase__, scheduler=lowerCAmelCase__, torch_dtype=torch.floataa, ) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = torch.manual_seed(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=2, output_type='np', ) snake_case_ = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
312
1
"""simple docstring""" import sys import turtle def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> tuple[float, float]: return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> None: my_pen.up() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) if depth == 0: return triangle(UpperCAmelCase , get_mid(UpperCAmelCase , UpperCAmelCase ) , get_mid(UpperCAmelCase , UpperCAmelCase ) , depth - 1 ) triangle(UpperCAmelCase , get_mid(UpperCAmelCase , UpperCAmelCase ) , get_mid(UpperCAmelCase , UpperCAmelCase ) , depth - 1 ) triangle(UpperCAmelCase , get_mid(UpperCAmelCase , UpperCAmelCase ) , get_mid(UpperCAmelCase , UpperCAmelCase ) , depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( '''Correct format for using this script: ''' '''python fractals.py <int:depth_for_fractal>''' ) __UpperCamelCase = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('''red''') __UpperCamelCase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
312
"""simple docstring""" import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __UpperCamelCase = logging.getLogger(__name__) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any: return (preds == labels).mean() @dataclass class UpperCamelCase : SCREAMING_SNAKE_CASE_ = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class UpperCamelCase : SCREAMING_SNAKE_CASE_ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} ) SCREAMING_SNAKE_CASE_ = field(metadata={"help": "Should contain the data files for the task."} ) SCREAMING_SNAKE_CASE_ = field( default=1_2_8 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def UpperCAmelCase ( ) -> Optional[int]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , UpperCAmelCase ) # Set seed set_seed(training_args.seed ) try: snake_case_ = processors[data_args.task_name]() snake_case_ = processor.get_labels() snake_case_ = len(UpperCAmelCase ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case_ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) snake_case_ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case_ = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , ) # Get datasets snake_case_ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) snake_case_ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(UpperCAmelCase ) -> Dict: snake_case_ = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )} # Data collator snake_case_ = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer snake_case_ = Trainer( model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation snake_case_ = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) snake_case_ = trainer.evaluate() snake_case_ = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_master(): with open(UpperCAmelCase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase ) writer.write('%s = %s\n' % (key, value) ) results.update(UpperCAmelCase ) return results def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
312
1
"""simple docstring""" from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = CustomTokenizer pass
312
"""simple docstring""" from __future__ import annotations import math def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: if depth < 0: raise ValueError('Depth cannot be less than 0' ) if len(UpperCAmelCase ) == 0: raise ValueError('Scores cannot be empty' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , ) return min( minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , ) def UpperCAmelCase ( ) -> None: snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423] snake_case_ = math.log(len(UpperCAmelCase ) , 2 ) print('Optimal value : ' , end='' ) print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
312
1
"""simple docstring""" from __future__ import annotations import math def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list: if len(UpperCAmelCase ) != 2 or len(a[0] ) != 2 or len(UpperCAmelCase ) != 2 or len(b[0] ) != 2: raise Exception('Matrices are not 2x2' ) snake_case_ = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Tuple: return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(UpperCAmelCase ) ) ] def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(UpperCAmelCase ) ) ] def UpperCAmelCase ( UpperCAmelCase ) -> tuple[list, list, list, list]: if len(UpperCAmelCase ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception('Odd matrices are not supported!' ) snake_case_ = len(UpperCAmelCase ) snake_case_ = matrix_length // 2 snake_case_ = [[a[i][j] for j in range(UpperCAmelCase , UpperCAmelCase )] for i in range(UpperCAmelCase )] snake_case_ = [ [a[i][j] for j in range(UpperCAmelCase , UpperCAmelCase )] for i in range(UpperCAmelCase , UpperCAmelCase ) ] snake_case_ = [[a[i][j] for j in range(UpperCAmelCase )] for i in range(UpperCAmelCase )] snake_case_ = [[a[i][j] for j in range(UpperCAmelCase )] for i in range(UpperCAmelCase , UpperCAmelCase )] return top_left, top_right, bot_left, bot_right def UpperCAmelCase ( UpperCAmelCase ) -> tuple[int, int]: return len(UpperCAmelCase ), len(matrix[0] ) def UpperCAmelCase ( UpperCAmelCase ) -> None: print('\n'.join(str(UpperCAmelCase ) for line in matrix ) ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list: if matrix_dimensions(UpperCAmelCase ) == (2, 2): return default_matrix_multiplication(UpperCAmelCase , UpperCAmelCase ) snake_case_ , snake_case_ , snake_case_ , snake_case_ = split_matrix(UpperCAmelCase ) snake_case_ , snake_case_ , snake_case_ , snake_case_ = split_matrix(UpperCAmelCase ) snake_case_ = actual_strassen(UpperCAmelCase , matrix_subtraction(UpperCAmelCase , UpperCAmelCase ) ) snake_case_ = actual_strassen(matrix_addition(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) snake_case_ = actual_strassen(matrix_addition(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) snake_case_ = actual_strassen(UpperCAmelCase , matrix_subtraction(UpperCAmelCase , UpperCAmelCase ) ) snake_case_ = actual_strassen(matrix_addition(UpperCAmelCase , UpperCAmelCase ) , matrix_addition(UpperCAmelCase , UpperCAmelCase ) ) snake_case_ = actual_strassen(matrix_subtraction(UpperCAmelCase , UpperCAmelCase ) , matrix_addition(UpperCAmelCase , UpperCAmelCase ) ) snake_case_ = actual_strassen(matrix_subtraction(UpperCAmelCase , UpperCAmelCase ) , matrix_addition(UpperCAmelCase , UpperCAmelCase ) ) snake_case_ = matrix_addition(matrix_subtraction(matrix_addition(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) , UpperCAmelCase ) snake_case_ = matrix_addition(UpperCAmelCase , UpperCAmelCase ) snake_case_ = matrix_addition(UpperCAmelCase , UpperCAmelCase ) snake_case_ = matrix_subtraction(matrix_subtraction(matrix_addition(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) , UpperCAmelCase ) # construct the new matrix from our 4 quadrants snake_case_ = [] for i in range(len(UpperCAmelCase ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(UpperCAmelCase ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list: if matrix_dimensions(UpperCAmelCase )[1] != matrix_dimensions(UpperCAmelCase )[0]: snake_case_ = ( 'Unable to multiply these matrices, please check the dimensions.\n' f'Matrix A: {matrixa}\n' f'Matrix B: {matrixa}' ) raise Exception(UpperCAmelCase ) snake_case_ = matrix_dimensions(UpperCAmelCase ) snake_case_ = matrix_dimensions(UpperCAmelCase ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] snake_case_ = max(*UpperCAmelCase , *UpperCAmelCase ) snake_case_ = int(math.pow(2 , math.ceil(math.loga(UpperCAmelCase ) ) ) ) snake_case_ = matrixa snake_case_ = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , UpperCAmelCase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , UpperCAmelCase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , UpperCAmelCase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) snake_case_ = actual_strassen(UpperCAmelCase , UpperCAmelCase ) # Removing the additional zeros for i in range(0 , UpperCAmelCase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , UpperCAmelCase ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": __UpperCamelCase = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] __UpperCamelCase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
312
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Optional[Any]: if n_shave_prefix_segments >= 0: return ".".join(path.split('.' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('.' )[:n_shave_prefix_segments] ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Dict: snake_case_ = [] for old_item in old_list: snake_case_ = old_item.replace('in_layers.0' , 'norm1' ) snake_case_ = new_item.replace('in_layers.2' , 'conv1' ) snake_case_ = new_item.replace('out_layers.0' , 'norm2' ) snake_case_ = new_item.replace('out_layers.3' , 'conv2' ) snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' ) snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' ) snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Union[str, Any]: snake_case_ = [] for old_item in old_list: snake_case_ = old_item snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' ) snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' ) snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' ) snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' ) snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]: assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): snake_case_ = old_checkpoint[path] snake_case_ = old_tensor.shape[0] // 3 snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3 snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 ) snake_case_ = query.reshape(UpperCAmelCase ) snake_case_ = key.reshape(UpperCAmelCase ) snake_case_ = value.reshape(UpperCAmelCase ) for path in paths: snake_case_ = path['new'] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' ) snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' ) snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' ) if additional_replacements is not None: for replacement in additional_replacements: snake_case_ = new_path.replace(replacement['old'] , replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: snake_case_ = old_checkpoint[path['old']][:, :, 0] else: snake_case_ = old_checkpoint[path['old']] def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]: snake_case_ = {} snake_case_ = checkpoint['time_embed.0.weight'] snake_case_ = checkpoint['time_embed.0.bias'] snake_case_ = checkpoint['time_embed.2.weight'] snake_case_ = checkpoint['time_embed.2.bias'] snake_case_ = checkpoint['input_blocks.0.0.weight'] snake_case_ = checkpoint['input_blocks.0.0.bias'] snake_case_ = checkpoint['out.0.weight'] snake_case_ = checkpoint['out.0.bias'] snake_case_ = checkpoint['out.2.weight'] snake_case_ = checkpoint['out.2.bias'] # Retrieves the keys for the input blocks only snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} ) snake_case_ = { layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key] for layer_id in range(UpperCAmelCase ) } # Retrieves the keys for the middle blocks only snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} ) snake_case_ = { layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key] for layer_id in range(UpperCAmelCase ) } # Retrieves the keys for the output blocks only snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} ) snake_case_ = { layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key] for layer_id in range(UpperCAmelCase ) } for i in range(1 , UpperCAmelCase ): snake_case_ = (i - 1) // (config['num_res_blocks'] + 1) snake_case_ = (i - 1) % (config['num_res_blocks'] + 1) snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key] snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key] if f'input_blocks.{i}.0.op.weight' in checkpoint: snake_case_ = checkpoint[ f'input_blocks.{i}.0.op.weight' ] snake_case_ = checkpoint[ f'input_blocks.{i}.0.op.bias' ] continue snake_case_ = renew_resnet_paths(UpperCAmelCase ) snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'} snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'} assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase ) if len(UpperCAmelCase ): snake_case_ = renew_attention_paths(UpperCAmelCase ) snake_case_ = { 'old': f'input_blocks.{i}.1', 'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}', } snake_case_ = { f'input_blocks.{i}.1.qkv.bias': { 'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', 'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', 'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'input_blocks.{i}.1.qkv.weight': { 'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', 'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', 'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , ) snake_case_ = middle_blocks[0] snake_case_ = middle_blocks[1] snake_case_ = middle_blocks[2] snake_case_ = renew_resnet_paths(UpperCAmelCase ) assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase ) snake_case_ = renew_resnet_paths(UpperCAmelCase ) assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase ) snake_case_ = renew_attention_paths(UpperCAmelCase ) snake_case_ = { 'middle_block.1.qkv.bias': { 'key': 'mid_block.attentions.0.key.bias', 'query': 'mid_block.attentions.0.query.bias', 'value': 'mid_block.attentions.0.value.bias', }, 'middle_block.1.qkv.weight': { 'key': 'mid_block.attentions.0.key.weight', 'query': 'mid_block.attentions.0.query.weight', 'value': 'mid_block.attentions.0.value.weight', }, } assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase ) for i in range(UpperCAmelCase ): snake_case_ = i // (config['num_res_blocks'] + 1) snake_case_ = i % (config['num_res_blocks'] + 1) snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]] snake_case_ = {} for layer in output_block_layers: snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(UpperCAmelCase ) else: snake_case_ = [layer_name] if len(UpperCAmelCase ) > 1: snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key] snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key] snake_case_ = renew_resnet_paths(UpperCAmelCase ) snake_case_ = renew_resnet_paths(UpperCAmelCase ) snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'} assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] ) snake_case_ = checkpoint[ f'output_blocks.{i}.{index}.conv.weight' ] snake_case_ = checkpoint[ f'output_blocks.{i}.{index}.conv.bias' ] # Clear attentions as they have been attributed above. if len(UpperCAmelCase ) == 2: snake_case_ = [] if len(UpperCAmelCase ): snake_case_ = renew_attention_paths(UpperCAmelCase ) snake_case_ = { 'old': f'output_blocks.{i}.1', 'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}', } snake_case_ = { f'output_blocks.{i}.1.qkv.bias': { 'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', 'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', 'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'output_blocks.{i}.1.qkv.weight': { 'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', 'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', 'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , ) else: snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] ) snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] ) snake_case_ = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') __UpperCamelCase = parser.parse_args() __UpperCamelCase = torch.load(args.checkpoint_path) with open(args.config_file) as f: __UpperCamelCase = json.loads(f.read()) __UpperCamelCase = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __UpperCamelCase = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
312
1
"""simple docstring""" import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) __UpperCamelCase = pytest.mark.integration @pytest.mark.parametrize('path' , ['paws', 'csv'] ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]: inspect_dataset(UpperCAmelCase , UpperCAmelCase ) snake_case_ = path + '.py' assert script_name in os.listdir(UpperCAmelCase ) assert "__pycache__" not in os.listdir(UpperCAmelCase ) @pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.parametrize('path' , ['accuracy'] ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> str: inspect_metric(UpperCAmelCase , UpperCAmelCase ) snake_case_ = path + '.py' assert script_name in os.listdir(UpperCAmelCase ) assert "__pycache__" not in os.listdir(UpperCAmelCase ) @pytest.mark.parametrize( 'path, config_name, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]: snake_case_ = get_dataset_config_info(UpperCAmelCase , config_name=UpperCAmelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]: with pytest.raises(UpperCAmelCase ): get_dataset_config_info(UpperCAmelCase , config_name=UpperCAmelCase ) @pytest.mark.parametrize( 'path, expected' , [ ('squad', 'plain_text'), ('acronym_identification', 'default'), ('lhoestq/squad', 'plain_text'), ('lhoestq/test', 'default'), ('lhoestq/demo1', 'lhoestq--demo1'), ('dalle-mini/wit', 'dalle-mini--wit'), ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> str: snake_case_ = get_dataset_config_names(UpperCAmelCase ) assert expected in config_names @pytest.mark.parametrize( 'path, expected_configs, expected_splits_in_first_config' , [ ('squad', ['plain_text'], ['train', 'validation']), ('dalle-mini/wit', ['dalle-mini--wit'], ['train']), ('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']), ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: snake_case_ = get_dataset_infos(UpperCAmelCase ) assert list(infos.keys() ) == expected_configs snake_case_ = expected_configs[0] assert expected_config in infos snake_case_ = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( 'path, expected_config, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple: snake_case_ = get_dataset_infos(UpperCAmelCase ) assert expected_config in infos snake_case_ = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: with pytest.raises(UpperCAmelCase ): get_dataset_split_names(UpperCAmelCase , config_name=UpperCAmelCase )
312
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def UpperCAmelCase ( UpperCAmelCase ) -> Dict: # vision encoder if "img_encoder.pos_embed" in name: snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' ) if "img_encoder.patch_embed.proj" in name: snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' ) if "img_encoder.patch_embed.norm" in name: snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' ) if "img_encoder.layers" in name: snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' ) if "blocks" in name and "res" not in name: snake_case_ = name.replace('blocks' , 'layers' ) if "attn" in name and "pre_assign" not in name: snake_case_ = name.replace('attn' , 'self_attn' ) if "proj" in name and "self_attn" in name and "text" not in name: snake_case_ = name.replace('proj' , 'out_proj' ) if "pre_assign_attn.attn.proj" in name: snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' ) if "norm1" in name: snake_case_ = name.replace('norm1' , 'layer_norm1' ) if "norm2" in name and "pre_assign" not in name: snake_case_ = name.replace('norm2' , 'layer_norm2' ) if "img_encoder.norm" in name: snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' ) # text encoder if "text_encoder.token_embedding" in name: snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' ) if "text_encoder.positional_embedding" in name: snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "text_encoder.transformer.resblocks." in name: snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' ) if "ln_1" in name: snake_case_ = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: snake_case_ = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: snake_case_ = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: snake_case_ = name.replace('c_proj' , 'fc2' ) if "text_encoder" in name: snake_case_ = name.replace('text_encoder' , 'text_model' ) if "ln_final" in name: snake_case_ = name.replace('ln_final' , 'final_layer_norm' ) # projection layers if "img_projector.linear_hidden." in name: snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' ) if "img_projector.linear_out." in name: snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' ) if "text_projector.linear_hidden" in name: snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' ) if "text_projector.linear_out" in name: snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' ) return name def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: for key in orig_state_dict.copy().keys(): snake_case_ = orig_state_dict.pop(UpperCAmelCase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors snake_case_ = key.split('.' ) snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] ) snake_case_ = config.vision_config.hidden_size if "weight" in key: snake_case_ = val[:dim, :] snake_case_ = val[dim : dim * 2, :] snake_case_ = val[-dim:, :] else: snake_case_ = val[:dim] snake_case_ = val[dim : dim * 2] snake_case_ = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors snake_case_ = key.split('.' ) snake_case_ = int(key_split[3] ) snake_case_ = config.text_config.hidden_size if "weight" in key: snake_case_ = val[:dim, :] snake_case_ = val[ dim : dim * 2, : ] snake_case_ = val[-dim:, :] else: snake_case_ = val[:dim] snake_case_ = val[dim : dim * 2] snake_case_ = val[-dim:] else: snake_case_ = rename_key(UpperCAmelCase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): snake_case_ = val.squeeze_() else: snake_case_ = val return orig_state_dict def UpperCAmelCase ( ) -> Any: snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="groupvit-gcc-yfcc" , UpperCAmelCase=False ) -> int: snake_case_ = GroupViTConfig() snake_case_ = GroupViTModel(UpperCAmelCase ).eval() snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model'] snake_case_ = convert_state_dict(UpperCAmelCase , UpperCAmelCase ) snake_case_ , snake_case_ = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase ) == 0) # verify result snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' ) snake_case_ = prepare_img() snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' ) with torch.no_grad(): snake_case_ = model(**UpperCAmelCase ) if model_name == "groupvit-gcc-yfcc": snake_case_ = torch.tensor([[13.3_523, 6.3_629]] ) elif model_name == "groupvit-gcc-redcaps": snake_case_ = torch.tensor([[16.1_873, 8.6_230]] ) else: raise ValueError(f'Model name {model_name} not supported.' ) assert torch.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 ) processor.save_pretrained(UpperCAmelCase ) model.save_pretrained(UpperCAmelCase ) print('Successfully saved processor and model to' , UpperCAmelCase ) if push_to_hub: print('Pushing to the hub...' ) processor.push_to_hub(UpperCAmelCase , organization='nielsr' ) model.push_to_hub(UpperCAmelCase , organization='nielsr' ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.''' ) parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''') parser.add_argument( '''--model_name''', default='''groupvit-gccy-fcc''', type=str, help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''', ) __UpperCamelCase = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
312
1
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase = 100 ) -> int: snake_case_ = n * (n + 1) * (2 * n + 1) / 6 snake_case_ = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
312
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase = {'''configuration_mmbt''': ['''MMBTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings'''] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
312
1
"""simple docstring""" __UpperCamelCase = tuple[float, float, float] __UpperCamelCase = tuple[float, float, float] def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Vectorad: snake_case_ = end_pointa[0] - end_pointa[0] snake_case_ = end_pointa[1] - end_pointa[1] snake_case_ = end_pointa[2] - end_pointa[2] return (x, y, z) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Vectorad: snake_case_ = ab[1] * ac[2] - ab[2] * ac[1] # *i snake_case_ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j snake_case_ = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool: return tuple(round(UpperCAmelCase , UpperCAmelCase ) for x in vector ) == (0, 0, 0) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 10 ) -> bool: snake_case_ = create_vector(UpperCAmelCase , UpperCAmelCase ) snake_case_ = create_vector(UpperCAmelCase , UpperCAmelCase ) return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
312
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[str]: if partitions <= 0: raise ValueError('partitions must be a positive number!' ) if partitions > number_of_bytes: raise ValueError('partitions can not > number_of_bytes!' ) snake_case_ = number_of_bytes // partitions snake_case_ = [] for i in range(UpperCAmelCase ): snake_case_ = i * bytes_per_partition + 1 snake_case_ = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'{start_bytes}-{end_bytes}' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
312
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''andreasmadsen/efficient_mlm_m0.40''': ( '''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json''' ), } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "roberta-prelayernorm" def __init__( self, lowerCAmelCase__=5_0265, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=3072, lowerCAmelCase__="gelu", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=512, lowerCAmelCase__=2, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=0, lowerCAmelCase__=2, lowerCAmelCase__="absolute", lowerCAmelCase__=True, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Dict: super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = hidden_act snake_case_ = intermediate_size snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = position_embedding_type snake_case_ = use_cache snake_case_ = classifier_dropout class UpperCamelCase ( lowerCAmelCase__ ): @property def a_ ( self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case_ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case_ = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ])
312
"""simple docstring""" __UpperCamelCase = 256 # Modulus to hash a string __UpperCamelCase = 100_0003 def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool: snake_case_ = len(UpperCAmelCase ) snake_case_ = len(UpperCAmelCase ) if p_len > t_len: return False snake_case_ = 0 snake_case_ = 0 snake_case_ = 1 # Calculating the hash of pattern and substring of text for i in range(UpperCAmelCase ): snake_case_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus snake_case_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue snake_case_ = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash snake_case_ = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def UpperCAmelCase ( ) -> None: snake_case_ = 'abc1abc12' snake_case_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc' snake_case_ = 'alskfjaldsk23adsfabcabc' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 2) snake_case_ = 'ABABX' snake_case_ = 'ABABZABABYABABX' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 3) snake_case_ = 'AAAB' snake_case_ = 'ABAAAAAB' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 4) snake_case_ = 'abcdabcy' snake_case_ = 'abcxabcdabxabcdabcdabcy' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 5) snake_case_ = 'Lü' snake_case_ = 'Lüsai' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) snake_case_ = 'Lue' assert not rabin_karp(UpperCAmelCase , UpperCAmelCase ) print('Success.' ) if __name__ == "__main__": test_rabin_karp()
312
1
"""simple docstring""" from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def UpperCAmelCase ( ) -> int: snake_case_ = { 'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'], 'path': ['test_1.py', 'test_2.py', 'unit_test.py'], 'content': ['a ' * 20, 'a ' * 30, 'b ' * 7], } snake_case_ = Dataset.from_dict(UpperCAmelCase ) return dataset class UpperCamelCase ( lowerCAmelCase__ ): def a_ ( self) -> str: snake_case_ = get_dataset() snake_case_ = make_duplicate_clusters(lowerCAmelCase__, 0.85) self.assertEqual(len(duplicate_clusters[0]), 2) def a_ ( self) -> Optional[Any]: snake_case_ = get_dataset() snake_case_ , snake_case_ = deduplicate_dataset(lowerCAmelCase__) self.assertEqual(len(lowerCAmelCase__), 2) print(lowerCAmelCase__) self.assertEqual(duplicate_clusters[0][0]['copies'], 2) self.assertEqual(duplicate_clusters[0][0]['is_extreme'], lowerCAmelCase__)
312
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''', } class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "resnet" SCREAMING_SNAKE_CASE_ = ["basic", "bottleneck"] def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=64, lowerCAmelCase__=[256, 512, 1024, 2048], lowerCAmelCase__=[3, 4, 6, 3], lowerCAmelCase__="bottleneck", lowerCAmelCase__="relu", lowerCAmelCase__=False, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Dict: super().__init__(**lowerCAmelCase__) if layer_type not in self.layer_types: raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types)}') snake_case_ = num_channels snake_case_ = embedding_size snake_case_ = hidden_sizes snake_case_ = depths snake_case_ = layer_type snake_case_ = hidden_act snake_case_ = downsample_in_first_stage snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(lowerCAmelCase__) + 1)] snake_case_ , snake_case_ = get_aligned_output_features_output_indices( out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names) class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = version.parse("1.11" ) @property def a_ ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def a_ ( self) -> float: return 1e-3
312
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: __UpperCamelCase = None __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase = { '''vocab_file''': { '''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''', '''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''', '''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''', }, } __UpperCamelCase = { '''google/fnet-base''': 512, '''google/fnet-large''': 512, } __UpperCamelCase = '''▁''' class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = ["input_ids", "token_type_ids"] SCREAMING_SNAKE_CASE_ = FNetTokenizer def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=False, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__="<unk>", lowerCAmelCase__="[SEP]", lowerCAmelCase__="<pad>", lowerCAmelCase__="[CLS]", lowerCAmelCase__="[MASK]", **lowerCAmelCase__, ) -> int: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. snake_case_ = ( AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__, normalized=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token ) super().__init__( lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, do_lower_case=lowerCAmelCase__, remove_space=lowerCAmelCase__, keep_accents=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = do_lower_case snake_case_ = remove_space snake_case_ = keep_accents snake_case_ = vocab_file snake_case_ = False if not self.vocab_file else True def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase__): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return snake_case_ = os.path.join( lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__): copyfile(self.vocab_file, lowerCAmelCase__) return (out_vocab_file,)
312
"""simple docstring""" import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __UpperCamelCase = get_tests_dir('''fixtures/spiece.model''') @require_sentencepiece @require_tokenizers class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = DebertaVaTokenizer SCREAMING_SNAKE_CASE_ = DebertaVaTokenizerFast SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = True def a_ ( self) -> int: super().setUp() # We have a SentencePiece fixture for testing snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, unk_token='<unk>') tokenizer.save_pretrained(self.tmpdirname) def a_ ( self, lowerCAmelCase__) -> Any: snake_case_ = 'this is a test' snake_case_ = 'this is a test' return input_text, output_text def a_ ( self) -> Optional[int]: snake_case_ = '<pad>' snake_case_ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__), lowerCAmelCase__) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__), lowerCAmelCase__) def a_ ( self) -> Tuple: snake_case_ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], '<pad>') self.assertEqual(vocab_keys[1], '<unk>') self.assertEqual(vocab_keys[-1], '[PAD]') self.assertEqual(len(lowerCAmelCase__), 3_0001) def a_ ( self) -> Dict: self.assertEqual(self.get_tokenizer().vocab_size, 3_0000) def a_ ( self) -> List[str]: # fmt: off snake_case_ = ' \tHeLLo!how \n Are yoU? ' snake_case_ = ['▁hello', '!', 'how', '▁are', '▁you', '?'] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) @unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.') def a_ ( self) -> str: pass @unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.') def a_ ( self) -> List[Any]: pass def a_ ( self) -> str: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> List[Any]: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Dict: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Tuple: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Any: # fmt: off snake_case_ = ' \tHeLLo!how \n Are yoU? ' snake_case_ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?'] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Dict: snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer() snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = self.get_rust_tokenizer() snake_case_ = tokenizer.encode(lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> int: snake_case_ = 'This is a test' snake_case_ = [13, 1, 4398, 25, 21, 1289] snake_case_ = ['▁', 'T', 'his', '▁is', '▁a', '▁test'] snake_case_ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test'] snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, keep_accents=lowerCAmelCase__) snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] snake_case_ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ] snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ] # fmt: on snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Tuple: snake_case_ = DebertaVaTokenizer(lowerCAmelCase__) snake_case_ = tokenizer.encode('sequence builders') snake_case_ = tokenizer.encode('multi-sequence build') snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__) snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id], lowerCAmelCase__) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id], lowerCAmelCase__, ) @slow def a_ ( self) -> Union[str, Any]: # fmt: off snake_case_ = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__, model_name='microsoft/deberta-v2-xlarge', revision='ad6e42c1532ddf3a15c39246b63f5559d558b670', )
312
1
"""simple docstring""" import colorsys from PIL import Image # type: ignore def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float: snake_case_ = x snake_case_ = y for step in range(UpperCAmelCase ): # noqa: B007 snake_case_ = a * a - b * b + x snake_case_ = 2 * a * b + y snake_case_ = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def UpperCAmelCase ( UpperCAmelCase ) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def UpperCAmelCase ( UpperCAmelCase ) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase , 1 , 1 ) ) def UpperCAmelCase ( UpperCAmelCase = 800 , UpperCAmelCase = 600 , UpperCAmelCase = -0.6 , UpperCAmelCase = 0 , UpperCAmelCase = 3.2 , UpperCAmelCase = 50 , UpperCAmelCase = True , ) -> Image.Image: snake_case_ = Image.new('RGB' , (image_width, image_height) ) snake_case_ = img.load() # loop through the image-coordinates for image_x in range(UpperCAmelCase ): for image_y in range(UpperCAmelCase ): # determine the figure-coordinates based on the image-coordinates snake_case_ = figure_width / image_width * image_height snake_case_ = figure_center_x + (image_x / image_width - 0.5) * figure_width snake_case_ = figure_center_y + (image_y / image_height - 0.5) * figure_height snake_case_ = get_distance(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: snake_case_ = get_color_coded_rgb(UpperCAmelCase ) else: snake_case_ = get_black_and_white_rgb(UpperCAmelCase ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure __UpperCamelCase = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
312
"""simple docstring""" # flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {} __UpperCamelCase = {} __UpperCamelCase = {} def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , ) -> Optional[Any]: snake_case_ = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' ) snake_case_ = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' ) snake_case_ = format_type def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]: snake_case_ = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): snake_case_ = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['''python''']) _register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow''']) _register_formatter(NumpyFormatter, '''numpy''', aliases=['''np''']) _register_formatter(PandasFormatter, '''pandas''', aliases=['''pd''']) _register_formatter(CustomFormatter, '''custom''') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch''']) else: __UpperCamelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''') _register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch''']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf''']) else: __UpperCamelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''') _register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf''']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, '''jax''', aliases=[]) else: __UpperCamelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''') _register_unavailable_formatter(_jax_error, '''jax''', aliases=[]) def UpperCAmelCase ( UpperCAmelCase ) -> Optional[str]: if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def UpperCAmelCase ( UpperCAmelCase , **UpperCAmelCase ) -> Formatter: snake_case_ = get_format_type_from_alias(UpperCAmelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**UpperCAmelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
312
1
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''', '''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''', '''microsoft/deberta-v2-xlarge-mnli''': ( '''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json''' ), '''microsoft/deberta-v2-xxlarge-mnli''': ( '''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json''' ), } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "deberta-v2" def __init__( self, lowerCAmelCase__=12_8100, lowerCAmelCase__=1536, lowerCAmelCase__=24, lowerCAmelCase__=24, lowerCAmelCase__=6144, lowerCAmelCase__="gelu", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=512, lowerCAmelCase__=0, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-7, lowerCAmelCase__=False, lowerCAmelCase__=-1, lowerCAmelCase__=0, lowerCAmelCase__=True, lowerCAmelCase__=None, lowerCAmelCase__=0, lowerCAmelCase__="gelu", **lowerCAmelCase__, ) -> List[str]: super().__init__(**lowerCAmelCase__) snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = initializer_range snake_case_ = relative_attention snake_case_ = max_relative_positions snake_case_ = pad_token_id snake_case_ = position_biased_input # Backwards compatibility if type(lowerCAmelCase__) == str: snake_case_ = [x.strip() for x in pos_att_type.lower().split('|')] snake_case_ = pos_att_type snake_case_ = vocab_size snake_case_ = layer_norm_eps snake_case_ = kwargs.get('pooler_hidden_size', lowerCAmelCase__) snake_case_ = pooler_dropout snake_case_ = pooler_hidden_act class UpperCamelCase ( lowerCAmelCase__ ): @property def a_ ( self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case_ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case_ = {0: 'batch', 1: 'sequence'} if self._config.type_vocab_size > 0: return OrderedDict( [('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)]) else: return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)]) @property def a_ ( self) -> int: return 12 def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = -1, lowerCAmelCase__ = -1, lowerCAmelCase__ = -1, lowerCAmelCase__ = False, lowerCAmelCase__ = None, lowerCAmelCase__ = 3, lowerCAmelCase__ = 40, lowerCAmelCase__ = 40, lowerCAmelCase__ = None, ) -> Mapping[str, Any]: snake_case_ = super().generate_dummy_inputs(preprocessor=lowerCAmelCase__, framework=lowerCAmelCase__) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
312
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all MVP models at https://huggingface.co/models?filter=mvp __UpperCamelCase = { '''vocab_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''', }, '''added_tokens.json''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''', }, '''merges_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''', }, } __UpperCamelCase = { '''RUCAIBox/mvp''': 1024, } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ = MvpTokenizer def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Union[str, Any]: super().__init__( lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, trim_offsets=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space: snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type')) snake_case_ = add_prefix_space snake_case_ = pre_tok_class(**lowerCAmelCase__) snake_case_ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` snake_case_ = 'post_processor' snake_case_ = getattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__) if tokenizer_component_instance: snake_case_ = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: snake_case_ = tuple(state['sep']) if "cls" in state: snake_case_ = tuple(state['cls']) snake_case_ = False if state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space: snake_case_ = add_prefix_space snake_case_ = True if state.get('trim_offsets', lowerCAmelCase__) != trim_offsets: snake_case_ = trim_offsets snake_case_ = True if changes_to_apply: snake_case_ = getattr(lowerCAmelCase__, state.pop('type')) snake_case_ = component_class(**lowerCAmelCase__) setattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__) @property def a_ ( self) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.') return None return str(self._mask_token) @mask_token.setter def a_ ( self, lowerCAmelCase__) -> Any: snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else value snake_case_ = value def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding: snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' 'to use it with pretokenized inputs.') return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding: snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' 'to use it with pretokenized inputs.') return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__) return tuple(lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> str: snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
312
1
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file __UpperCamelCase = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.''' def UpperCAmelCase ( UpperCAmelCase=None ) -> int: if subparsers is not None: snake_case_ = subparsers.add_parser('tpu-config' , description=_description ) else: snake_case_ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description ) # Core arguments snake_case_ = parser.add_argument_group( 'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' ) config_args.add_argument( '--config_file' , type=UpperCAmelCase , default=UpperCAmelCase , help='Path to the config file to use for accelerate.' , ) config_args.add_argument( '--tpu_name' , default=UpperCAmelCase , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , ) config_args.add_argument( '--tpu_zone' , default=UpperCAmelCase , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , ) snake_case_ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' ) pod_args.add_argument( '--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , ) pod_args.add_argument( '--command_file' , default=UpperCAmelCase , help='The path to the file containing the commands to run on the pod on startup.' , ) pod_args.add_argument( '--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , ) pod_args.add_argument( '--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , ) pod_args.add_argument( '--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , ) pod_args.add_argument( '--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' ) if subparsers is not None: parser.set_defaults(func=UpperCAmelCase ) return parser def UpperCAmelCase ( UpperCAmelCase ) -> Tuple: snake_case_ = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(UpperCAmelCase ): snake_case_ = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: snake_case_ = defaults.command_file if not args.command and defaults.commands is not None: snake_case_ = defaults.commands if not args.tpu_name: snake_case_ = defaults.tpu_name if not args.tpu_zone: snake_case_ = defaults.tpu_zone if args.accelerate_version == "dev": snake_case_ = 'git+https://github.com/huggingface/accelerate.git' elif args.accelerate_version == "latest": snake_case_ = 'accelerate -U' elif isinstance(parse(args.accelerate_version ) , UpperCAmelCase ): snake_case_ = f'accelerate=={args.accelerate_version}' if not args.command_file and not args.command: raise ValueError('You must specify either a command file or a command to run on the pod.' ) if args.command_file: with open(args.command_file , 'r' ) as f: snake_case_ = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , UpperCAmelCase ): snake_case_ = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate snake_case_ = ['cd /usr/share'] if args.install_accelerate: new_cmd += [f'pip install {args.accelerate_version}'] new_cmd += args.command snake_case_ = '; '.join(UpperCAmelCase ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess snake_case_ = ['gcloud'] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(f'Running {" ".join(UpperCAmelCase )}' ) return subprocess.run(UpperCAmelCase ) print('Successfully setup pod.' ) def UpperCAmelCase ( ) -> List[Any]: snake_case_ = tpu_command_parser() snake_case_ = parser.parse_args() tpu_command_launcher(UpperCAmelCase )
312
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def a_ ( self, lowerCAmelCase__=0) -> List[Any]: snake_case_ = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase__)) snake_case_ = np.random.RandomState(lowerCAmelCase__) snake_case_ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def a_ ( self) -> Optional[Any]: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087]) assert np.abs(image_slice - expected_slice).max() < 1e-1 def a_ ( self) -> List[str]: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> str: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) # warmup pass to apply optimizations snake_case_ = pipe(**self.get_dummy_inputs()) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> int: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> Dict: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> Dict: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): @property def a_ ( self) -> int: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a_ ( self) -> str: snake_case_ = ort.SessionOptions() snake_case_ = False return options def a_ ( self) -> Any: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg') snake_case_ = init_image.resize((768, 512)) # using the PNDM scheduler by default snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = 'A fantasy landscape, trending on artstation' snake_case_ = np.random.RandomState(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images snake_case_ = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) snake_case_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 def a_ ( self) -> List[Any]: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg') snake_case_ = init_image.resize((768, 512)) snake_case_ = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx') snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = 'A fantasy landscape, trending on artstation' snake_case_ = np.random.RandomState(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images snake_case_ = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) snake_case_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
312
1
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase ) -> str: if isinstance(UpperCAmelCase , UpperCAmelCase ): raise TypeError('\'float\' object cannot be interpreted as an integer' ) if isinstance(UpperCAmelCase , UpperCAmelCase ): raise TypeError('\'str\' object cannot be interpreted as an integer' ) if num == 0: return "0b0" snake_case_ = False if num < 0: snake_case_ = True snake_case_ = -num snake_case_ = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(UpperCAmelCase ) for e in binary ) return "0b" + "".join(str(UpperCAmelCase ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
312
"""simple docstring""" import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __UpperCamelCase = False __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = '''ybelkada/fonts''' def UpperCAmelCase ( ) -> Dict: if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ' 'Pix2StructImageProcessor. Please upgrade torch.' ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str: requires_backends(UpperCAmelCase , ['torch'] ) _check_torch_version() snake_case_ = image_tensor.unsqueeze(0 ) snake_case_ = torch.nn.functional.unfold(UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) ) snake_case_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase , UpperCAmelCase , -1 ) snake_case_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 36 , UpperCAmelCase = "black" , UpperCAmelCase = "white" , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Image.Image: requires_backends(UpperCAmelCase , 'vision' ) # Add new lines so that each line is no more than 80 characters. snake_case_ = textwrap.TextWrapper(width=80 ) snake_case_ = wrapper.wrap(text=UpperCAmelCase ) snake_case_ = '\n'.join(UpperCAmelCase ) if font_bytes is not None and font_path is None: snake_case_ = io.BytesIO(UpperCAmelCase ) elif font_path is not None: snake_case_ = font_path else: snake_case_ = hf_hub_download(UpperCAmelCase , 'Arial.TTF' ) snake_case_ = ImageFont.truetype(UpperCAmelCase , encoding='UTF-8' , size=UpperCAmelCase ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. snake_case_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , UpperCAmelCase ) ) snake_case_ , snake_case_ , snake_case_ , snake_case_ = temp_draw.textbbox((0, 0) , UpperCAmelCase , UpperCAmelCase ) # Create the actual image with a bit of padding around the text. snake_case_ = text_width + left_padding + right_padding snake_case_ = text_height + top_padding + bottom_padding snake_case_ = Image.new('RGB' , (image_width, image_height) , UpperCAmelCase ) snake_case_ = ImageDraw.Draw(UpperCAmelCase ) draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase , fill=UpperCAmelCase , font=UpperCAmelCase ) return image def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any: requires_backends(UpperCAmelCase , 'vision' ) # Convert to PIL image if necessary snake_case_ = to_pil_image(UpperCAmelCase ) snake_case_ = render_text(UpperCAmelCase , **UpperCAmelCase ) snake_case_ = max(header_image.width , image.width ) snake_case_ = int(image.height * (new_width / image.width) ) snake_case_ = int(header_image.height * (new_width / header_image.width) ) snake_case_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary snake_case_ = to_numpy_array(UpperCAmelCase ) if infer_channel_dimension_format(UpperCAmelCase ) == ChannelDimension.LAST: snake_case_ = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST ) return new_image class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["flattened_patches"] def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 2048, lowerCAmelCase__ = False, **lowerCAmelCase__, ) -> None: super().__init__(**lowerCAmelCase__) snake_case_ = patch_size if patch_size is not None else {'height': 16, 'width': 16} snake_case_ = do_normalize snake_case_ = do_convert_rgb snake_case_ = max_patches snake_case_ = is_vqa def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> np.ndarray: requires_backends(self.extract_flattened_patches, 'torch') _check_torch_version() # convert to torch snake_case_ = to_channel_dimension_format(lowerCAmelCase__, ChannelDimension.FIRST) snake_case_ = torch.from_numpy(lowerCAmelCase__) snake_case_ , snake_case_ = patch_size['height'], patch_size['width'] snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__) # maximize scale s.t. snake_case_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width)) snake_case_ = max(min(math.floor(scale * image_height / patch_height), lowerCAmelCase__), 1) snake_case_ = max(min(math.floor(scale * image_width / patch_width), lowerCAmelCase__), 1) snake_case_ = max(num_feasible_rows * patch_height, 1) snake_case_ = max(num_feasible_cols * patch_width, 1) snake_case_ = torch.nn.functional.interpolate( image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=lowerCAmelCase__, antialias=lowerCAmelCase__, ).squeeze(0) # [1, rows, columns, patch_height * patch_width * image_channels] snake_case_ = torch_extract_patches(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) snake_case_ = patches.shape snake_case_ = patches_shape[1] snake_case_ = patches_shape[2] snake_case_ = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] snake_case_ = patches.reshape([rows * columns, depth]) # [rows * columns, 1] snake_case_ = torch.arange(lowerCAmelCase__).reshape([rows, 1]).repeat(1, lowerCAmelCase__).reshape([rows * columns, 1]) snake_case_ = torch.arange(lowerCAmelCase__).reshape([1, columns]).repeat(lowerCAmelCase__, 1).reshape([rows * columns, 1]) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] snake_case_ = row_ids.to(torch.floataa) snake_case_ = col_ids.to(torch.floataa) # [rows * columns, 2 + patch_height * patch_width * image_channels] snake_case_ = torch.cat([row_ids, col_ids, patches], -1) # [max_patches, 2 + patch_height * patch_width * image_channels] snake_case_ = torch.nn.functional.pad(lowerCAmelCase__, [0, 0, 0, max_patches - (rows * columns)]).float() snake_case_ = to_numpy_array(lowerCAmelCase__) return result def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray: if image.dtype == np.uinta: snake_case_ = image.astype(np.floataa) # take mean across the whole `image` snake_case_ = np.mean(lowerCAmelCase__) snake_case_ = np.std(lowerCAmelCase__) snake_case_ = max(lowerCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape))) return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> ImageInput: snake_case_ = do_normalize if do_normalize is not None else self.do_normalize snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb snake_case_ = patch_size if patch_size is not None else self.patch_size snake_case_ = max_patches if max_patches is not None else self.max_patches snake_case_ = self.is_vqa if kwargs.get('data_format', lowerCAmelCase__) is not None: raise ValueError('data_format is not an accepted input as the outputs are ') snake_case_ = make_list_of_images(lowerCAmelCase__) if not valid_images(lowerCAmelCase__): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') # PIL RGBA images are converted to RGB if do_convert_rgb: snake_case_ = [convert_to_rgb(lowerCAmelCase__) for image in images] # All transformations expect numpy arrays. snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images] if is_vqa: if header_text is None: raise ValueError('A header text must be provided for VQA models.') snake_case_ = kwargs.pop('font_bytes', lowerCAmelCase__) snake_case_ = kwargs.pop('font_path', lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__): snake_case_ = [header_text] * len(lowerCAmelCase__) snake_case_ = [ render_header(lowerCAmelCase__, header_text[i], font_bytes=lowerCAmelCase__, font_path=lowerCAmelCase__) for i, image in enumerate(lowerCAmelCase__) ] if do_normalize: snake_case_ = [self.normalize(image=lowerCAmelCase__) for image in images] # convert to torch tensor and permute snake_case_ = [ self.extract_flattened_patches(image=lowerCAmelCase__, max_patches=lowerCAmelCase__, patch_size=lowerCAmelCase__) for image in images ] # create attention mask in numpy snake_case_ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images] snake_case_ = BatchFeature( data={'flattened_patches': images, 'attention_mask': attention_masks}, tensor_type=lowerCAmelCase__) return encoded_outputs
312
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "beit" def __init__( self, lowerCAmelCase__=8192, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=3072, lowerCAmelCase__="gelu", lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=224, lowerCAmelCase__=16, lowerCAmelCase__=3, lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=True, lowerCAmelCase__=[3, 5, 7, 11], lowerCAmelCase__=[1, 2, 3, 6], lowerCAmelCase__=True, lowerCAmelCase__=0.4, lowerCAmelCase__=256, lowerCAmelCase__=1, lowerCAmelCase__=False, lowerCAmelCase__=255, **lowerCAmelCase__, ) -> Optional[int]: super().__init__(**lowerCAmelCase__) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = use_mask_token snake_case_ = use_absolute_position_embeddings snake_case_ = use_relative_position_bias snake_case_ = use_shared_relative_position_bias snake_case_ = layer_scale_init_value snake_case_ = drop_path_rate snake_case_ = use_mean_pooling # decode head attributes (semantic segmentation) snake_case_ = out_indices snake_case_ = pool_scales # auxiliary head attributes (semantic segmentation) snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = semantic_loss_ignore_index class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = version.parse("1.11" ) @property def a_ ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def a_ ( self) -> float: return 1e-4
312
"""simple docstring""" from math import pi def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float: return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
312
1
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def a_ ( self, lowerCAmelCase__=0) -> List[Any]: snake_case_ = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase__)) snake_case_ = np.random.RandomState(lowerCAmelCase__) snake_case_ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def a_ ( self) -> Optional[Any]: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087]) assert np.abs(image_slice - expected_slice).max() < 1e-1 def a_ ( self) -> List[str]: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> str: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) # warmup pass to apply optimizations snake_case_ = pipe(**self.get_dummy_inputs()) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> int: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> Dict: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> Dict: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): @property def a_ ( self) -> int: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a_ ( self) -> str: snake_case_ = ort.SessionOptions() snake_case_ = False return options def a_ ( self) -> Any: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg') snake_case_ = init_image.resize((768, 512)) # using the PNDM scheduler by default snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = 'A fantasy landscape, trending on artstation' snake_case_ = np.random.RandomState(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images snake_case_ = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) snake_case_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 def a_ ( self) -> List[Any]: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg') snake_case_ = init_image.resize((768, 512)) snake_case_ = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx') snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = 'A fantasy landscape, trending on artstation' snake_case_ = np.random.RandomState(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images snake_case_ = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) snake_case_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
312
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': ( '''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json''' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "trajectory_transformer" SCREAMING_SNAKE_CASE_ = ["past_key_values"] SCREAMING_SNAKE_CASE_ = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, lowerCAmelCase__=100, lowerCAmelCase__=5, lowerCAmelCase__=1, lowerCAmelCase__=1, lowerCAmelCase__=249, lowerCAmelCase__=6, lowerCAmelCase__=17, lowerCAmelCase__=25, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=128, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0006, lowerCAmelCase__=512, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=True, lowerCAmelCase__=1, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, **lowerCAmelCase__, ) -> Optional[Any]: snake_case_ = vocab_size snake_case_ = action_weight snake_case_ = reward_weight snake_case_ = value_weight snake_case_ = max_position_embeddings snake_case_ = block_size snake_case_ = action_dim snake_case_ = observation_dim snake_case_ = transition_dim snake_case_ = learning_rate snake_case_ = n_layer snake_case_ = n_head snake_case_ = n_embd snake_case_ = embd_pdrop snake_case_ = attn_pdrop snake_case_ = resid_pdrop snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = kaiming_initializer_range snake_case_ = use_cache super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
312
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase = { '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
312
"""simple docstring""" from ..utils import DummyObject, requires_backends class UpperCamelCase ( metaclass=lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["keras_nlp"] def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int: requires_backends(self, ['keras_nlp'])
312
1
"""simple docstring""" from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging __UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase ( lowerCAmelCase__ ): def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> Dict: super().__init__() if hasattr(scheduler.config, 'steps_offset') and scheduler.config.steps_offset != 1: snake_case_ = ( f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`' f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure ' 'to update the config accordingly as leaving `steps_offset` might led to incorrect results' ' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,' ' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`' ' file' ) deprecate('steps_offset!=1', '1.0.0', lowerCAmelCase__, standard_warn=lowerCAmelCase__) snake_case_ = dict(scheduler.config) snake_case_ = 1 snake_case_ = FrozenDict(lowerCAmelCase__) if hasattr(scheduler.config, 'skip_prk_steps') and scheduler.config.skip_prk_steps is False: snake_case_ = ( f'The configuration file of this scheduler: {scheduler} has not set the configuration' ' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make' ' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to' ' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face' ' Hub, it would be very nice if you could open a Pull request for the' ' `scheduler/scheduler_config.json` file' ) deprecate('skip_prk_steps not set', '1.0.0', lowerCAmelCase__, standard_warn=lowerCAmelCase__) snake_case_ = dict(scheduler.config) snake_case_ = True snake_case_ = FrozenDict(lowerCAmelCase__) if safety_checker is None: logger.warning( f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure' ' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered' ' results in services or applications open to the public. Both the diffusers team and Hugging Face' ' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling' ' it only for use-cases that involve analyzing network behavior or auditing its results. For more' ' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') self.register_modules( segmentation_model=lowerCAmelCase__, segmentation_processor=lowerCAmelCase__, vae=lowerCAmelCase__, text_encoder=lowerCAmelCase__, tokenizer=lowerCAmelCase__, unet=lowerCAmelCase__, scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, ) def a_ ( self, lowerCAmelCase__ = "auto") -> Optional[Any]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory snake_case_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCAmelCase__) def a_ ( self) -> List[Any]: self.enable_attention_slicing(lowerCAmelCase__) def a_ ( self) -> Dict: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`') snake_case_ = torch.device('cuda') for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(lowerCAmelCase__, lowerCAmelCase__) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def a_ ( self) -> Union[str, Any]: if self.device != torch.device('meta') or not hasattr(self.unet, '_hf_hook'): return self.device for module in self.unet.modules(): if ( hasattr(lowerCAmelCase__, '_hf_hook') and hasattr(module._hf_hook, 'execution_device') and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device @torch.no_grad() def __call__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = 512, lowerCAmelCase__ = 512, lowerCAmelCase__ = 50, lowerCAmelCase__ = 7.5, lowerCAmelCase__ = None, lowerCAmelCase__ = 1, lowerCAmelCase__ = 0.0, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = "pil", lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 1, **lowerCAmelCase__, ) -> str: snake_case_ = self.segmentation_processor( text=[text], images=[image], padding='max_length', return_tensors='pt').to(self.device) snake_case_ = self.segmentation_model(**lowerCAmelCase__) snake_case_ = torch.sigmoid(outputs.logits).cpu().detach().unsqueeze(-1).numpy() snake_case_ = self.numpy_to_pil(lowerCAmelCase__)[0].resize(image.size) # Run inpainting pipeline with the generated mask snake_case_ = StableDiffusionInpaintPipeline( vae=self.vae, text_encoder=self.text_encoder, tokenizer=self.tokenizer, unet=self.unet, scheduler=self.scheduler, safety_checker=self.safety_checker, feature_extractor=self.feature_extractor, ) return inpainting_pipeline( prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, height=lowerCAmelCase__, width=lowerCAmelCase__, num_inference_steps=lowerCAmelCase__, guidance_scale=lowerCAmelCase__, negative_prompt=lowerCAmelCase__, num_images_per_prompt=lowerCAmelCase__, eta=lowerCAmelCase__, generator=lowerCAmelCase__, latents=lowerCAmelCase__, output_type=lowerCAmelCase__, return_dict=lowerCAmelCase__, callback=lowerCAmelCase__, callback_steps=lowerCAmelCase__, )
312
"""simple docstring""" import os import numpy import onnx def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = a.name snake_case_ = b.name snake_case_ = '' snake_case_ = '' snake_case_ = a == b snake_case_ = name_a snake_case_ = name_b return res def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase , UpperCAmelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any: snake_case_ = list(model.graph.initializer ) snake_case_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i snake_case_ = inits[i].name snake_case_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]: snake_case_ = os.path.dirname(UpperCAmelCase ) snake_case_ = os.path.basename(UpperCAmelCase ) snake_case_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) ) snake_case_ = list(model.graph.initializer ) snake_case_ = set() snake_case_ = {} snake_case_ = [] snake_case_ = 0 for i in range(len(UpperCAmelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase ) dup_set.add(UpperCAmelCase ) snake_case_ = inits[j].data_type snake_case_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , UpperCAmelCase ) total_reduced_size += mem_size snake_case_ = inits[i].name snake_case_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase ) else: snake_case_ = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) snake_case_ = sorted(UpperCAmelCase ) _remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) snake_case_ = 'optimized_' + model_file_name snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) onnx.save(UpperCAmelCase , UpperCAmelCase ) return new_model
312
1
"""simple docstring""" from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def UpperCAmelCase ( UpperCAmelCase ) -> int: snake_case_ = prime_factors(UpperCAmelCase ) if is_square_free(UpperCAmelCase ): return -1 if len(UpperCAmelCase ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
312
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: __UpperCamelCase = None __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json''' ), }, } __UpperCamelCase = { '''moussaKam/mbarthez''': 1024, '''moussaKam/barthez''': 1024, '''moussaKam/barthez-orangesum-title''': 1024, } __UpperCamelCase = '''▁''' class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ = BarthezTokenizer def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]: # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token super().__init__( lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = vocab_file snake_case_ = False if not self.vocab_file else True def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.') if not os.path.isdir(lowerCAmelCase__): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return snake_case_ = os.path.join( lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__): copyfile(self.vocab_file, lowerCAmelCase__) return (out_vocab_file,)
312
1
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=lowerCAmelCase__ ) class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) SCREAMING_SNAKE_CASE_ = Features({"audio": Audio()} ) SCREAMING_SNAKE_CASE_ = Features({"labels": ClassLabel} ) SCREAMING_SNAKE_CASE_ = "audio" SCREAMING_SNAKE_CASE_ = "labels" def a_ ( self, lowerCAmelCase__) -> Dict: if self.label_column not in features: raise ValueError(f'Column {self.label_column} is not present in features.') if not isinstance(features[self.label_column], lowerCAmelCase__): raise ValueError(f'Column {self.label_column} is not a ClassLabel.') snake_case_ = copy.deepcopy(self) snake_case_ = self.label_schema.copy() snake_case_ = features[self.label_column] snake_case_ = label_schema return task_template @property def a_ ( self) -> Dict[str, str]: return { self.audio_column: "audio", self.label_column: "labels", }
312
"""simple docstring""" import functools def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int: # Validation if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for day in days ): raise ValueError('The parameter days should be a list of integers' ) if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for cost in costs ): raise ValueError('The parameter costs should be a list of three integers' ) if len(UpperCAmelCase ) == 0: return 0 if min(UpperCAmelCase ) <= 0: raise ValueError('All days elements should be greater than 0' ) if max(UpperCAmelCase ) >= 366: raise ValueError('All days elements should be less than 366' ) snake_case_ = set(UpperCAmelCase ) @functools.cache def dynamic_programming(UpperCAmelCase ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
312
1