code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __lowercase ( unittest.TestCase ): """simple docstring""" def __init__( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict=13 , lowerCAmelCase__ : int=7 , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Dict=99 , lowerCAmelCase__ : List[Any]=32 , lowerCAmelCase__ : Dict=5 , lowerCAmelCase__ : Union[str, Any]=4 , lowerCAmelCase__ : List[str]=37 , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Tuple=512 , lowerCAmelCase__ : Dict=16 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : List[str]=4 , ): SCREAMING_SNAKE_CASE_: Optional[int] = parent SCREAMING_SNAKE_CASE_: Union[str, Any] = batch_size SCREAMING_SNAKE_CASE_: Tuple = seq_length SCREAMING_SNAKE_CASE_: Optional[Any] = is_training SCREAMING_SNAKE_CASE_: int = use_attention_mask SCREAMING_SNAKE_CASE_: Optional[int] = use_token_type_ids SCREAMING_SNAKE_CASE_: Tuple = use_labels SCREAMING_SNAKE_CASE_: List[Any] = vocab_size SCREAMING_SNAKE_CASE_: Any = hidden_size SCREAMING_SNAKE_CASE_: str = num_hidden_layers SCREAMING_SNAKE_CASE_: Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE_: List[str] = intermediate_size SCREAMING_SNAKE_CASE_: Dict = hidden_act SCREAMING_SNAKE_CASE_: Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE_: List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_: Any = max_position_embeddings SCREAMING_SNAKE_CASE_: str = type_vocab_size SCREAMING_SNAKE_CASE_: Optional[int] = type_sequence_label_size SCREAMING_SNAKE_CASE_: List[str] = initializer_range SCREAMING_SNAKE_CASE_: Dict = num_choices def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_: Dict = None if self.use_attention_mask: SCREAMING_SNAKE_CASE_: str = random_attention_mask([self.batch_size, self.seq_length]) SCREAMING_SNAKE_CASE_: Tuple = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) SCREAMING_SNAKE_CASE_: int = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: List[Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = config_and_inputs SCREAMING_SNAKE_CASE_: Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class __lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : List[Any] = True _UpperCAmelCase : str = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: Tuple = FlaxRoFormerModelTester(self) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE_: Optional[int] = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = model(np.ones((1, 1))) self.assertIsNotNone(lowerCAmelCase__) @require_flax class __lowercase ( unittest.TestCase ): """simple docstring""" @slow def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: List[str] = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base") SCREAMING_SNAKE_CASE_: Union[str, Any] = jnp.array([[0, 1, 2, 3, 4, 5]]) SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__)[0] SCREAMING_SNAKE_CASE_: str = 5_0000 SCREAMING_SNAKE_CASE_: Optional[int] = (1, 6, vocab_size) self.assertEqual(output.shape , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]]) self.assertTrue(jnp.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4))
13
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _snake_case ( lowercase__ : Dict , lowercase__ : Dict , lowercase__ : str , lowercase__ : Tuple="attention" ) -> str: '''simple docstring''' lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""] lowerCAmelCase_ :Union[str, Any] = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""] lowerCAmelCase_ :Any = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""] lowerCAmelCase_ :Optional[int] = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""] return k, o, q, v def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : int , lowercase__ : Any=False ) -> int: '''simple docstring''' if split_mlp_wi: lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""] lowerCAmelCase_ :List[str] = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""] lowerCAmelCase_ :Tuple = (wi_a, wi_a) else: lowerCAmelCase_ :List[Any] = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""] lowerCAmelCase_ :Dict = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""] return wi, wo def _snake_case ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] ) -> Tuple: '''simple docstring''' return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""] def _snake_case ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = traverse_util.flatten_dict(variables["""target"""] ) lowerCAmelCase_ :Tuple = {"""/""".join(lowercase__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowerCAmelCase_ :Any = """encoder/layers_0/mlp/wi_0/kernel""" in old print("""Split MLP:""" , lowercase__ ) lowerCAmelCase_ :List[Any] = collections.OrderedDict() # Shared embeddings. lowerCAmelCase_ :Optional[int] = old["""token_embedder/embedding"""] # Encoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" ) lowerCAmelCase_ :Optional[Any] = layer_norm lowerCAmelCase_ :Any = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Tuple = q.T lowerCAmelCase_ :str = v.T # Block i, layer 1 (MLP). lowerCAmelCase_ :Dict = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Any = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ ) lowerCAmelCase_ :Union[str, Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :List[Any] = wi[0].T lowerCAmelCase_ :Dict = wi[1].T else: lowerCAmelCase_ :int = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Tuple = old[ """encoder/relpos_bias/rel_embedding""" ].T lowerCAmelCase_ :List[str] = old["""encoder/encoder_norm/scale"""] if not is_encoder_only: # Decoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" ) lowerCAmelCase_ :List[Any] = layer_norm lowerCAmelCase_ :List[str] = k.T lowerCAmelCase_ :Any = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :Dict = v.T # Block i, layer 1 (Cross Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" ) lowerCAmelCase_ :Optional[int] = layer_norm lowerCAmelCase_ :str = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :int = v.T # Block i, layer 2 (MLP). lowerCAmelCase_ :Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Dict = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ ) lowerCAmelCase_ :List[Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :Any = wi[0].T lowerCAmelCase_ :Any = wi[1].T else: lowerCAmelCase_ :Tuple = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Optional[Any] = old["""decoder/decoder_norm/scale"""] lowerCAmelCase_ :Optional[Any] = old[ """decoder/relpos_bias/rel_embedding""" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowerCAmelCase_ :Tuple = old["""decoder/logits_dense/kernel"""].T return new def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Optional[int] = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Tuple = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) lowerCAmelCase_ :Any = state_dict["""shared.weight"""] return state_dict def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :List[Any] = checkpoints.load_tax_checkpoint(lowercase__ ) lowerCAmelCase_ :Optional[int] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ ) lowerCAmelCase_ :Union[str, Any] = make_state_dict(lowercase__ , lowercase__ ) model.load_state_dict(lowercase__ , strict=lowercase__ ) def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : bool = False ) -> Any: '''simple docstring''' lowerCAmelCase_ :Any = TaConfig.from_json_file(lowercase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowerCAmelCase_ :List[Any] = TaEncoderModel(lowercase__ ) else: lowerCAmelCase_ :List[str] = TaForConditionalGeneration(lowercase__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(lowercase__ ) # Verify that we can load the checkpoint. model.from_pretrained(lowercase__ ) print("""Done""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) __UpperCAmelCase = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
84
0
"""simple docstring""" from math import ceil def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple ): lowercase_ : List[Any] = list(range(0 , __SCREAMING_SNAKE_CASE ) ) lowercase_ : List[Any] = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check lowercase_ : Optional[int] = [] for i in device_map_blocks: if device_map_blocks.count(__SCREAMING_SNAKE_CASE ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(__SCREAMING_SNAKE_CASE ) # Missing blocks lowercase_ : List[Any] = [i for i in blocks if i not in device_map_blocks] lowercase_ : Optional[int] = [i for i in device_map_blocks if i not in blocks] if len(__SCREAMING_SNAKE_CASE ) != 0: raise ValueError( 'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.' ' These attention blocks were specified more than once: ' + str(__SCREAMING_SNAKE_CASE ) ) if len(__SCREAMING_SNAKE_CASE ) != 0: raise ValueError( 'There are attention blocks for this model that are not specified in the device_map. Add these attention ' 'blocks to a device on the device_map: ' + str(__SCREAMING_SNAKE_CASE ) ) if len(__SCREAMING_SNAKE_CASE ) != 0: raise ValueError( 'The device_map contains more attention blocks than this model has. Remove these from the device_map:' + str(__SCREAMING_SNAKE_CASE ) ) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase_ : Optional[Any] = list(range(__SCREAMING_SNAKE_CASE ) ) lowercase_ : List[str] = int(ceil(n_layers / len(__SCREAMING_SNAKE_CASE ) ) ) lowercase_ : Optional[Any] = [layers[i : i + n_blocks] for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] return dict(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
351
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ): lowercase_ : Dict = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification' lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Dict = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : str = 'patrickvonplaten/t5-tiny-random' lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] ) lowercase_ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2' lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(__UpperCamelCase ): self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'current' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Any = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
321
0
"""simple docstring""" from __future__ import annotations from collections.abc import Callable def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 100, ): UpperCAmelCase_ : List[str] = x_start UpperCAmelCase_ : List[str] = fnc(snake_case_ ) UpperCAmelCase_ : Tuple = 0.0 for _ in range(snake_case_ ): # Approximates small segments of curve as linear and solve # for trapezoidal area UpperCAmelCase_ : List[Any] = (x_end - x_start) / steps + xa UpperCAmelCase_ : Any = fnc(snake_case_ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step UpperCAmelCase_ : Optional[Any] = xa UpperCAmelCase_ : Any = fxa return area if __name__ == "__main__": def __a ( __lowerCamelCase ): return x**3 + x**2 print('f(x) = x^3 + x^2') print('The area between the curve, x = -5, x = 5 and the x axis is:') _a = 10 while i <= 100_000: print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""") i *= 10
61
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: _snake_case = None _snake_case = logging.get_logger(__name__) _snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _snake_case = { "vocab_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model", }, "tokenizer_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json", }, } _snake_case = { "google/fnet-base": 512, "google/fnet-large": 512, } _snake_case = "▁" class lowercase ( UpperCamelCase__ ): _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = ["input_ids", "token_type_ids"] _a = FNetTokenizer def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _A : int = ( AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a ) if isinstance(_a , _a ) else mask_token ) super().__init__( _a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , ) _A : Optional[int] = do_lower_case _A : List[Any] = remove_space _A : str = keep_accents _A : int = vocab_file _A : int = False if not self.vocab_file else True def a__ ( self , _a , _a = None ) -> List[int]: _A : str = [self.sep_token_id] _A : Dict = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a__ ( self , _a , _a = None ) -> List[int]: _A : Any = [self.sep_token_id] _A : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a__ ( self , _a , _a = None ) -> Tuple[str]: if not os.path.isdir(_a ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A : List[str] = os.path.join( _a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
26
0
'''simple docstring''' import os import sys import unittest UpperCamelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) UpperCamelCase : Optional[int] = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""") UpperCamelCase : Dict = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""") class UpperCamelCase ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self : List[str]): """simple docstring""" a : str = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE_) a : Union[str, Any] = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE_) a : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""} a : str = { """BlipModelTest""": """BlipModelTester""", """BlipTextImageModelTest""": """BlipTextImageModelsModelTester""", """BlipTextModelTest""": """BlipTextModelTester""", """BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""", """BlipVQAModelTest""": """BlipVQAModelTester""", """BlipVisionModelTest""": """BlipVisionModelTester""", } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_) def SCREAMING_SNAKE_CASE_ ( self : List[str]): """simple docstring""" a : List[str] = get_model_to_test_mapping(SCREAMING_SNAKE_CASE_) a : List[str] = get_model_to_test_mapping(SCREAMING_SNAKE_CASE_) a : Union[str, Any] = { """BertForMaskedLM""": ["""BertModelTest"""], """BertForMultipleChoice""": ["""BertModelTest"""], """BertForNextSentencePrediction""": ["""BertModelTest"""], """BertForPreTraining""": ["""BertModelTest"""], """BertForQuestionAnswering""": ["""BertModelTest"""], """BertForSequenceClassification""": ["""BertModelTest"""], """BertForTokenClassification""": ["""BertModelTest"""], """BertLMHeadModel""": ["""BertModelTest"""], """BertModel""": ["""BertModelTest"""], } a : int = { """BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTest"""], """BlipModel""": ["""BlipModelTest"""], """BlipTextModel""": ["""BlipTextModelTest"""], """BlipVisionModel""": ["""BlipVisionModelTest"""], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_) def SCREAMING_SNAKE_CASE_ ( self : int): """simple docstring""" a : Any = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE_) a : Optional[int] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE_) a : Optional[int] = { """BertForMaskedLM""": ["""BertModelTester"""], """BertForMultipleChoice""": ["""BertModelTester"""], """BertForNextSentencePrediction""": ["""BertModelTester"""], """BertForPreTraining""": ["""BertModelTester"""], """BertForQuestionAnswering""": ["""BertModelTester"""], """BertForSequenceClassification""": ["""BertModelTester"""], """BertForTokenClassification""": ["""BertModelTester"""], """BertLMHeadModel""": ["""BertModelTester"""], """BertModel""": ["""BertModelTester"""], } a : Union[str, Any] = { """BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTester"""], """BlipModel""": ["""BlipModelTester"""], """BlipTextModel""": ["""BlipTextModelTester"""], """BlipVisionModel""": ["""BlipVisionModelTester"""], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_)
355
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase ( a_ ): """simple docstring""" A : List[str] = ( "This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image." "It takes two arguments named `image` which should be the original image, and `label` which should be a text " "describing the elements what should be identified in the segmentation mask. The tool returns the mask." ) A : List[Any] = "CIDAS/clipseg-rd64-refined" A : Optional[Any] = "image_segmenter" A : List[Any] = CLIPSegForImageSegmentation A : Tuple = ["image", "text"] A : Optional[int] = ["image"] def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str): """simple docstring""" requires_backends(self , ['vision']) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : "Image" , UpperCAmelCase_ : str): """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase_ , return_tensors='pt') def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str): """simple docstring""" with torch.no_grad(): a : Union[str, Any] = self.model(**UpperCAmelCase_).logits return logits def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : int): """simple docstring""" a : int = outputs.cpu().detach().numpy() a : str = 0 a : str = 1 return Image.fromarray((array * 2_5_5).astype(np.uinta))
345
0
import fire from utils import calculate_rouge, save_json def _SCREAMING_SNAKE_CASE ( a , a , a=None , **a ) -> Dict: __A : Optional[int] = [x.strip() for x in open(a ).readlines()] __A : str = [x.strip() for x in open(a ).readlines()][: len(a )] __A : Tuple = calculate_rouge(a , a , **a ) if save_path is not None: save_json(a , a , indent=a ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
280
import argparse import json from tqdm import tqdm def _SCREAMING_SNAKE_CASE ( ) -> List[Any]: __A : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--src_path' , type=a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , ) parser.add_argument( '--evaluation_set' , type=a , help='where to store parsed evaluation_set file' , ) parser.add_argument( '--gold_data_path' , type=a , help='where to store parsed gold_data_path file' , ) __A : Optional[int] = parser.parse_args() with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open( args.gold_data_path , 'w' ) as gold_file: __A : List[Any] = json.load(a ) for dpr_record in tqdm(a ): __A : Dict = dpr_record['question'] __A : Any = [context['title'] for context in dpr_record['positive_ctxs']] eval_file.write(question + '\n' ) gold_file.write('\t'.join(a ) + '\n' ) if __name__ == "__main__": main()
280
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class a_ ( SCREAMING_SNAKE_CASE ): '''simple docstring''' UpperCAmelCase_ = 'canine' def __init__( self : Union[str, Any] , lowercase__ : List[str]=768 , lowercase__ : Any=12 , lowercase__ : Dict=12 , lowercase__ : Optional[Any]=3_072 , lowercase__ : List[Any]="gelu" , lowercase__ : Optional[Any]=0.1 , lowercase__ : Optional[Any]=0.1 , lowercase__ : Dict=16_384 , lowercase__ : List[str]=16 , lowercase__ : Union[str, Any]=0.02 , lowercase__ : Dict=1e-12 , lowercase__ : int=0 , lowercase__ : str=0xE000 , lowercase__ : Optional[Any]=0xE001 , lowercase__ : Any=4 , lowercase__ : Union[str, Any]=4 , lowercase__ : Optional[int]=8 , lowercase__ : Dict=16_384 , lowercase__ : Union[str, Any]=128 , **lowercase__ : Tuple , ): '''simple docstring''' super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__) lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = hidden_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_act lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = initializer_range lowerCAmelCase__ = type_vocab_size lowerCAmelCase__ = layer_norm_eps # Character config: lowerCAmelCase__ = downsampling_rate lowerCAmelCase__ = upsampling_kernel_size lowerCAmelCase__ = num_hash_functions lowerCAmelCase__ = num_hash_buckets lowerCAmelCase__ = local_transformer_stride
119
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { 'configuration_x_clip': [ 'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XCLIPConfig', 'XCLIPTextConfig', 'XCLIPVisionConfig', ], 'processing_x_clip': ['XCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'XCLIPModel', 'XCLIPPreTrainedModel', 'XCLIPTextModel', 'XCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
119
1
from maths.prime_check import is_prime def UpperCamelCase( __UpperCamelCase : int ): if not isinstance(__UpperCamelCase ,__UpperCamelCase ): lowerCAmelCase_ : Union[str, Any] = f"""Input value of [number={number}] must be an integer""" raise TypeError(__UpperCamelCase ) if is_prime(__UpperCamelCase ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
103
"""simple docstring""" from __future__ import annotations def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ): '''simple docstring''' return [ord(SCREAMING_SNAKE_CASE ) - 96 for elem in plain] def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : list[int] ): '''simple docstring''' return "".join(chr(elem + 96 ) for elem in encoded ) def UpperCAmelCase__ ( ): '''simple docstring''' lowerCAmelCase = encode(input("""-> """ ).strip().lower() ) print("""Encoded: """ , SCREAMING_SNAKE_CASE ) print("""Decoded:""" , decode(SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": main()
46
0
'''simple docstring''' from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class a__ ( a__ ): '''simple docstring''' lowercase__ : List[Any] = ["image_processor", "tokenizer"] lowercase__ : Tuple = "BlipImageProcessor" lowercase__ : List[Any] = "AutoTokenizer" def __init__( self , lowerCamelCase_ , lowerCamelCase_ ) -> Dict: lowerCAmelCase__ = False super().__init__(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = self.image_processor def __call__( self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = True , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = 0 , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = True , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> BatchEncoding: if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None: lowerCAmelCase__ = self.tokenizer lowerCAmelCase__ = self.tokenizer( text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , ) return text_encoding # add pixel_values lowerCAmelCase__ = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ ) if text is not None: lowerCAmelCase__ = self.tokenizer( text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , ) else: lowerCAmelCase__ = None if text_encoding is not None: encoding_image_processor.update(lowerCamelCase_ ) return encoding_image_processor def __SCREAMING_SNAKE_CASE ( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> Any: return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> Any: return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = self.tokenizer.model_input_names lowerCAmelCase__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
228
'''simple docstring''' import argparse import json import subprocess def _snake_case ( A , A ) -> Tuple: lowerCAmelCase__ = [] lowerCAmelCase__ = ( F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\"""" ''' https://api.github.com/repos/huggingface/transformers/actions/runners''' ) lowerCAmelCase__ = subprocess.run(A , shell=A , stdout=subprocess.PIPE ) lowerCAmelCase__ = output.stdout.decode('''utf-8''' ) lowerCAmelCase__ = json.loads(A ) lowerCAmelCase__ = status['''runners'''] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(A ) # save the result so we can report them on Slack with open('''offline_runners.txt''' , '''w''' ) as fp: fp.write(json.dumps(A ) ) if len(A ) > 0: lowerCAmelCase__ = '''\n'''.join([x['''name'''] for x in offline_runners] ) raise ValueError(F"""The following runners are offline:\n{failed}""" ) if __name__ == "__main__": def _snake_case ( A ) -> Optional[Any]: return values.split(''',''' ) __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--target_runners''', default=None, type=list_str, required=True, help='''Comma-separated list of runners to check status.''', ) parser.add_argument( '''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.''' ) __UpperCAmelCase = parser.parse_args() get_runner_status(args.target_runners, args.token)
228
1
"""simple docstring""" import unittest import torch from torch import nn from diffusers.models.activations import get_activation class a ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self: str ): """simple docstring""" A__ = get_activation("""swish""" ) self.assertIsInstance(UpperCamelCase , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase ( self: Any ): """simple docstring""" A__ = get_activation("""silu""" ) self.assertIsInstance(UpperCamelCase , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase ( self: Optional[int] ): """simple docstring""" A__ = get_activation("""mish""" ) self.assertIsInstance(UpperCamelCase , nn.Mish ) self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase ( self: Any ): """simple docstring""" A__ = get_activation("""gelu""" ) self.assertIsInstance(UpperCamelCase , nn.GELU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
335
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class a ( unittest.TestCase ): """simple docstring""" def __init__( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int]=7 , UpperCamelCase: str=3 , UpperCamelCase: int=30 , UpperCamelCase: int=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=None , UpperCamelCase: Any=True , UpperCamelCase: int=[0.5, 0.5, 0.5] , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=1 / 2_55 , UpperCamelCase: Tuple=True , ): """simple docstring""" A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33} A__ = parent A__ = batch_size A__ = num_channels A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size A__ = do_normalize A__ = image_mean A__ = image_std A__ = do_rescale A__ = rescale_factor A__ = do_pad def UpperCamelCase ( self: Optional[Any] ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCamelCase ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: int=False ): """simple docstring""" if not batched: A__ = image_inputs[0] if isinstance(UpperCamelCase , Image.Image ): A__ , A__ = image.size else: A__ , A__ = image.shape[1], image.shape[2] if w < h: A__ = int(self.size["""shortest_edge"""] * h / w ) A__ = self.size["""shortest_edge"""] elif w > h: A__ = self.size["""shortest_edge"""] A__ = int(self.size["""shortest_edge"""] * w / h ) else: A__ = self.size["""shortest_edge"""] A__ = self.size["""shortest_edge"""] else: A__ = [] for image in image_inputs: A__ , A__ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0] A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class a ( _lowerCamelCase, unittest.TestCase ): """simple docstring""" UpperCAmelCase = YolosImageProcessor if is_vision_available() else None def UpperCamelCase ( self: Optional[int] ): """simple docstring""" A__ = YolosImageProcessingTester(self ) @property def UpperCamelCase ( self: Optional[int] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase ( self: Union[str, Any] ): """simple docstring""" A__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) ) self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(UpperCamelCase , """size""" ) ) def UpperCamelCase ( self: Tuple ): """simple docstring""" A__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} ) self.assertEqual(image_processor.do_pad , UpperCamelCase ) A__ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad , UpperCamelCase ) def UpperCamelCase ( self: str ): """simple docstring""" pass def UpperCamelCase ( self: str ): """simple docstring""" A__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , Image.Image ) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase ) A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase ( self: Tuple ): """simple docstring""" A__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , np.ndarray ) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase ( self: str ): """simple docstring""" A__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , torch.Tensor ) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase ( self: str ): """simple docstring""" A__ = self.image_processing_class(**self.image_processor_dict ) A__ = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase ) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors A__ = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" ) A__ = image_processing_a(UpperCamelCase , return_tensors="""pt""" ) self.assertTrue( torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) ) @slow def UpperCamelCase ( self: str ): """simple docstring""" A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: A__ = json.loads(f.read() ) A__ = {"""image_id""": 3_97_69, """annotations""": target} # encode them A__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" ) A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" ) # verify pixel values A__ = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase ) A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) ) # verify area A__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) ) # verify boxes A__ = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase ) A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) ) # verify image_id A__ = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) ) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) ) # verify class_labels A__ = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) ) # verify orig_size A__ = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) ) # verify size A__ = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) ) @slow def UpperCamelCase ( self: int ): """simple docstring""" A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: A__ = json.loads(f.read() ) A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target} A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them A__ = YolosImageProcessor(format="""coco_panoptic""" ) A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" ) # verify pixel values A__ = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase ) A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) ) # verify area A__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) ) # verify boxes A__ = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase ) A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) ) # verify image_id A__ = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) ) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) ) # verify class_labels A__ = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) ) # verify masks A__ = 82_28_73 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase ) # verify orig_size A__ = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) ) # verify size A__ = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
335
1
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig __lowercase = logging.get_logger(__name__) # General docstring __lowercase = '''RegNetConfig''' # Base docstring __lowercase = '''facebook/regnet-y-040''' __lowercase = [1, 1088, 7, 7] # Image classification docstring __lowercase = '''facebook/regnet-y-040''' __lowercase = '''tabby, tabby cat''' __lowercase = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCamelCase_ ( nn.Module ): '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase = 3 , __lowercase = 1 , __lowercase = 1 , __lowercase = "relu" , ) -> Dict: super().__init__() __UpperCamelCase :Optional[int] = nn.Convad( __lowercase , __lowercase , kernel_size=__lowercase , stride=__lowercase , padding=kernel_size // 2 , groups=__lowercase , bias=__lowercase , ) __UpperCamelCase :Tuple = nn.BatchNormad(__lowercase) __UpperCamelCase :Any = ACTaFN[activation] if activation is not None else nn.Identity() def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]: __UpperCamelCase :str = self.convolution(__lowercase) __UpperCamelCase :Optional[Any] = self.normalization(__lowercase) __UpperCamelCase :List[str] = self.activation(__lowercase) return hidden_state class lowerCamelCase_ ( nn.Module ): '''simple docstring''' def __init__( self , __lowercase) -> Any: super().__init__() __UpperCamelCase :Any = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act) __UpperCamelCase :Optional[int] = config.num_channels def UpperCamelCase__ ( self , __lowercase) -> Union[str, Any]: __UpperCamelCase :str = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''') __UpperCamelCase :Any = self.embedder(__lowercase) return hidden_state class lowerCamelCase_ ( nn.Module ): '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase = 2) -> Optional[Any]: super().__init__() __UpperCamelCase :Dict = nn.Convad(__lowercase , __lowercase , kernel_size=1 , stride=__lowercase , bias=__lowercase) __UpperCamelCase :str = nn.BatchNormad(__lowercase) def UpperCamelCase__ ( self , __lowercase) -> Tensor: __UpperCamelCase :Any = self.convolution(__lowercase) __UpperCamelCase :Optional[Any] = self.normalization(__lowercase) return hidden_state class lowerCamelCase_ ( nn.Module ): '''simple docstring''' def __init__( self , __lowercase , __lowercase) -> int: super().__init__() __UpperCamelCase :List[str] = nn.AdaptiveAvgPoolad((1, 1)) __UpperCamelCase :Union[str, Any] = nn.Sequential( nn.Convad(__lowercase , __lowercase , kernel_size=1) , nn.ReLU() , nn.Convad(__lowercase , __lowercase , kernel_size=1) , nn.Sigmoid() , ) def UpperCamelCase__ ( self , __lowercase) -> Any: # b c h w -> b c 1 1 __UpperCamelCase :str = self.pooler(__lowercase) __UpperCamelCase :List[Any] = self.attention(__lowercase) __UpperCamelCase :Union[str, Any] = hidden_state * attention return hidden_state class lowerCamelCase_ ( nn.Module ): '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase = 1) -> List[str]: super().__init__() __UpperCamelCase :Tuple = in_channels != out_channels or stride != 1 __UpperCamelCase :Optional[int] = max(1 , out_channels // config.groups_width) __UpperCamelCase :Optional[int] = ( RegNetShortCut(__lowercase , __lowercase , stride=__lowercase) if should_apply_shortcut else nn.Identity() ) __UpperCamelCase :Optional[int] = nn.Sequential( RegNetConvLayer(__lowercase , __lowercase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(__lowercase , __lowercase , stride=__lowercase , groups=__lowercase , activation=config.hidden_act) , RegNetConvLayer(__lowercase , __lowercase , kernel_size=1 , activation=__lowercase) , ) __UpperCamelCase :Dict = ACTaFN[config.hidden_act] def UpperCamelCase__ ( self , __lowercase) -> Union[str, Any]: __UpperCamelCase :Dict = hidden_state __UpperCamelCase :int = self.layer(__lowercase) __UpperCamelCase :List[str] = self.shortcut(__lowercase) hidden_state += residual __UpperCamelCase :Any = self.activation(__lowercase) return hidden_state class lowerCamelCase_ ( nn.Module ): '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase = 1) -> Tuple: super().__init__() __UpperCamelCase :List[Any] = in_channels != out_channels or stride != 1 __UpperCamelCase :Optional[Any] = max(1 , out_channels // config.groups_width) __UpperCamelCase :List[Any] = ( RegNetShortCut(__lowercase , __lowercase , stride=__lowercase) if should_apply_shortcut else nn.Identity() ) __UpperCamelCase :List[str] = nn.Sequential( RegNetConvLayer(__lowercase , __lowercase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(__lowercase , __lowercase , stride=__lowercase , groups=__lowercase , activation=config.hidden_act) , RegNetSELayer(__lowercase , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(__lowercase , __lowercase , kernel_size=1 , activation=__lowercase) , ) __UpperCamelCase :List[str] = ACTaFN[config.hidden_act] def UpperCamelCase__ ( self , __lowercase) -> Any: __UpperCamelCase :List[str] = hidden_state __UpperCamelCase :str = self.layer(__lowercase) __UpperCamelCase :Optional[int] = self.shortcut(__lowercase) hidden_state += residual __UpperCamelCase :Optional[int] = self.activation(__lowercase) return hidden_state class lowerCamelCase_ ( nn.Module ): '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase = 2 , __lowercase = 2 , ) -> str: super().__init__() __UpperCamelCase :Tuple = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer __UpperCamelCase :Any = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( __lowercase , __lowercase , __lowercase , stride=__lowercase , ) , *[layer(__lowercase , __lowercase , __lowercase) for _ in range(depth - 1)] , ) def UpperCamelCase__ ( self , __lowercase) -> Dict: __UpperCamelCase :str = self.layers(__lowercase) return hidden_state class lowerCamelCase_ ( nn.Module ): '''simple docstring''' def __init__( self , __lowercase) -> Tuple: super().__init__() __UpperCamelCase :int = nn.ModuleList([]) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( __lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , )) __UpperCamelCase :List[str] = zip(config.hidden_sizes , config.hidden_sizes[1:]) for (in_channels, out_channels), depth in zip(__lowercase , config.depths[1:]): self.stages.append(RegNetStage(__lowercase , __lowercase , __lowercase , depth=__lowercase)) def UpperCamelCase__ ( self , __lowercase , __lowercase = False , __lowercase = True) -> BaseModelOutputWithNoAttention: __UpperCamelCase :Tuple = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __UpperCamelCase :List[str] = hidden_states + (hidden_state,) __UpperCamelCase :Optional[Any] = stage_module(__lowercase) if output_hidden_states: __UpperCamelCase :List[Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return BaseModelOutputWithNoAttention(last_hidden_state=__lowercase , hidden_states=__lowercase) class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' a__ : Optional[int] = RegNetConfig a__ : Dict = """regnet""" a__ : Any = """pixel_values""" a__ : Optional[Any] = True def UpperCamelCase__ ( self , __lowercase) -> List[str]: if isinstance(__lowercase , nn.Convad): nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''') elif isinstance(__lowercase , (nn.BatchNormad, nn.GroupNorm)): nn.init.constant_(module.weight , 1) nn.init.constant_(module.bias , 0) def UpperCamelCase__ ( self , __lowercase , __lowercase=False) -> int: if isinstance(__lowercase , __lowercase): __UpperCamelCase :Any = value __lowercase = r''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' __lowercase = r''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( """The bare RegNet model outputting raw features without any specific head on top.""" , UpperCAmelCase_ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self , __lowercase) -> Optional[int]: super().__init__(__lowercase) __UpperCamelCase :str = config __UpperCamelCase :str = RegNetEmbeddings(__lowercase) __UpperCamelCase :Optional[Any] = RegNetEncoder(__lowercase) __UpperCamelCase :Tuple = nn.AdaptiveAvgPoolad((1, 1)) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__lowercase) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None) -> BaseModelOutputWithPoolingAndNoAttention: __UpperCamelCase :int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase :Tuple = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase :str = self.embedder(__lowercase) __UpperCamelCase :List[str] = self.encoder( __lowercase , output_hidden_states=__lowercase , return_dict=__lowercase) __UpperCamelCase :Union[str, Any] = encoder_outputs[0] __UpperCamelCase :str = self.pooler(__lowercase) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__lowercase , pooler_output=__lowercase , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( """ RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ , UpperCAmelCase_ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self , __lowercase) -> Union[str, Any]: super().__init__(__lowercase) __UpperCamelCase :int = config.num_labels __UpperCamelCase :str = RegNetModel(__lowercase) # classification head __UpperCamelCase :Any = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__lowercase) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCamelCase__ ( self , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ) -> ImageClassifierOutputWithNoAttention: __UpperCamelCase :Tuple = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase :Any = self.regnet(__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase) __UpperCamelCase :Union[str, Any] = outputs.pooler_output if return_dict else outputs[1] __UpperCamelCase :List[Any] = self.classifier(__lowercase) __UpperCamelCase :str = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __UpperCamelCase :Dict = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __UpperCamelCase :Union[str, Any] = '''single_label_classification''' else: __UpperCamelCase :Any = '''multi_label_classification''' if self.config.problem_type == "regression": __UpperCamelCase :List[str] = MSELoss() if self.num_labels == 1: __UpperCamelCase :Any = loss_fct(logits.squeeze() , labels.squeeze()) else: __UpperCamelCase :str = loss_fct(__lowercase , __lowercase) elif self.config.problem_type == "single_label_classification": __UpperCamelCase :List[str] = CrossEntropyLoss() __UpperCamelCase :Optional[Any] = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1)) elif self.config.problem_type == "multi_label_classification": __UpperCamelCase :Optional[int] = BCEWithLogitsLoss() __UpperCamelCase :List[str] = loss_fct(__lowercase , __lowercase) if not return_dict: __UpperCamelCase :List[str] = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__lowercase , logits=__lowercase , hidden_states=outputs.hidden_states)
105
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self) -> int: __UpperCamelCase :str = 0 def UpperCamelCase__ ( self) -> Optional[Any]: __UpperCamelCase :Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''') self.assertIsInstance(__lowercase , __lowercase) def UpperCamelCase__ ( self) -> Optional[int]: with tempfile.TemporaryDirectory() as tmpdirname: __UpperCamelCase :int = Path(__lowercase) / '''preprocessor_config.json''' __UpperCamelCase :Dict = Path(__lowercase) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , ) json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''')) __UpperCamelCase :Union[str, Any] = AutoImageProcessor.from_pretrained(__lowercase) self.assertIsInstance(__lowercase , __lowercase) def UpperCamelCase__ ( self) -> Union[str, Any]: # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: __UpperCamelCase :str = Path(__lowercase) / '''preprocessor_config.json''' __UpperCamelCase :Union[str, Any] = Path(__lowercase) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , ) json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''')) __UpperCamelCase :Dict = AutoImageProcessor.from_pretrained(__lowercase) self.assertIsInstance(__lowercase , __lowercase) def UpperCamelCase__ ( self) -> Optional[int]: with tempfile.TemporaryDirectory() as tmpdirname: __UpperCamelCase :int = CLIPConfig() # Create a dummy config file with image_proceesor_type __UpperCamelCase :Tuple = Path(__lowercase) / '''preprocessor_config.json''' __UpperCamelCase :Optional[Any] = Path(__lowercase) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , ) json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''')) # remove image_processor_type to make sure config.json alone is enough to load image processor locally __UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained(__lowercase).to_dict() config_dict.pop('''image_processor_type''') __UpperCamelCase :List[str] = CLIPImageProcessor(**__lowercase) # save in new folder model_config.save_pretrained(__lowercase) config.save_pretrained(__lowercase) __UpperCamelCase :Dict = AutoImageProcessor.from_pretrained(__lowercase) # make sure private variable is not incorrectly saved __UpperCamelCase :Union[str, Any] = json.loads(config.to_json_string()) self.assertTrue('''_processor_class''' not in dict_as_saved) self.assertIsInstance(__lowercase , __lowercase) def UpperCamelCase__ ( self) -> List[str]: with tempfile.TemporaryDirectory() as tmpdirname: __UpperCamelCase :Tuple = Path(__lowercase) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , ) __UpperCamelCase :List[str] = AutoImageProcessor.from_pretrained(__lowercase) self.assertIsInstance(__lowercase , __lowercase) def UpperCamelCase__ ( self) -> Optional[int]: with self.assertRaisesRegex( __lowercase , '''clip-base is not a local folder and is not a valid model identifier'''): __UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained('''clip-base''') def UpperCamelCase__ ( self) -> List[Any]: with self.assertRaisesRegex( __lowercase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''): __UpperCamelCase :str = AutoImageProcessor.from_pretrained(__lowercase , revision='''aaaaaa''') def UpperCamelCase__ ( self) -> List[str]: with self.assertRaisesRegex( __lowercase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): __UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''') def UpperCamelCase__ ( self) -> str: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__lowercase): __UpperCamelCase :Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''') # If remote code is disabled, we can't load this config. with self.assertRaises(__lowercase): __UpperCamelCase :List[Any] = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase) __UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''') # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__lowercase) __UpperCamelCase :List[Any] = AutoImageProcessor.from_pretrained(__lowercase , trust_remote_code=__lowercase) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''') def UpperCamelCase__ ( self) -> Optional[Any]: try: AutoConfig.register('''custom''' , __lowercase) AutoImageProcessor.register(__lowercase , __lowercase) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__lowercase): AutoImageProcessor.register(__lowercase , __lowercase) with tempfile.TemporaryDirectory() as tmpdirname: __UpperCamelCase :int = Path(__lowercase) / '''preprocessor_config.json''' __UpperCamelCase :List[str] = Path(__lowercase) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , ) json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''')) __UpperCamelCase :int = CustomImageProcessor.from_pretrained(__lowercase) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__lowercase) __UpperCamelCase :int = AutoImageProcessor.from_pretrained(__lowercase) self.assertIsInstance(__lowercase , __lowercase) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def UpperCamelCase__ ( self) -> List[Any]: class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' a__ : List[str] = True try: AutoConfig.register('''custom''' , __lowercase) AutoImageProcessor.register(__lowercase , __lowercase) # If remote code is not set, the default is to use local __UpperCamelCase :str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''') self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''') self.assertTrue(image_processor.is_local) # If remote code is disabled, we load the local one. __UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''') self.assertTrue(image_processor.is_local) # If remote is enabled, we load from the Hub __UpperCamelCase :List[str] = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''') self.assertTrue(not hasattr(__lowercase , '''is_local''')) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
105
1
'''simple docstring''' import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants __lowercase : Union[str, Any] = Mapping[str, np.ndarray] __lowercase : Tuple = Mapping[str, Any] # Is a nested dict. __lowercase : Any = 0.01 @dataclasses.dataclass(frozen=lowerCAmelCase_ ) class __UpperCamelCase : A_ = 42 # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. A_ = 42 # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. A_ = 42 # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. A_ = 42 # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. A_ = 42 # [num_res, num_atom_type] # Chain indices for multi-chain predictions A_ = None # Optional remark about the protein. Included as a comment in output PDB # files A_ = None # Templates used to generate this protein (prediction-only) A_ = None # Chain corresponding to each parent A_ = None def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : List[str] = r'(\[[A-Z]+\]\n)' __a : List[str] = [tag.strip() for tag in re.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0] __a : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] ) __a : List[str] = ["N", "CA", "C"] __a : Dict = None __a : Optional[int] = None __a : Optional[Any] = None for g in groups: if "[PRIMARY]" == g[0]: __a : str = g[1][0].strip() for i in range(len(_SCREAMING_SNAKE_CASE ) ): if seq[i] not in residue_constants.restypes: __a : Union[str, Any] = 'X' # FIXME: strings are immutable __a : List[Any] = np.array( [residue_constants.restype_order.get(_SCREAMING_SNAKE_CASE , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: __a : List[List[float]] = [] for axis in range(3 ): tertiary.append(list(map(_SCREAMING_SNAKE_CASE , g[1][axis].split() ) ) ) __a : List[Any] = np.array(_SCREAMING_SNAKE_CASE ) __a : str = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(_SCREAMING_SNAKE_CASE ): __a : Dict = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: __a : int = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) ) __a : str = np.zeros( ( len(_SCREAMING_SNAKE_CASE ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(_SCREAMING_SNAKE_CASE ): __a : Optional[Any] = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=_SCREAMING_SNAKE_CASE , atom_mask=_SCREAMING_SNAKE_CASE , aatype=_SCREAMING_SNAKE_CASE , residue_index=np.arange(len(_SCREAMING_SNAKE_CASE ) ) , b_factors=_SCREAMING_SNAKE_CASE , ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Protein , _SCREAMING_SNAKE_CASE : int = 0 ): __a : List[str] = [] __a : Any = prot.remark if remark is not None: pdb_headers.append(F"""REMARK {remark}""" ) __a : Any = prot.parents __a : Tuple = prot.parents_chain_index if parents is not None and parents_chain_index is not None: __a : Tuple = [p for i, p in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if i == chain_id] if parents is None or len(_SCREAMING_SNAKE_CASE ) == 0: __a : List[str] = ['N/A'] pdb_headers.append(F"""PARENT {" ".join(_SCREAMING_SNAKE_CASE )}""" ) return pdb_headers def lowerCamelCase (_SCREAMING_SNAKE_CASE : Protein , _SCREAMING_SNAKE_CASE : str ): __a : List[str] = [] __a : List[str] = pdb_str.split('\n' ) __a : List[Any] = prot.remark if remark is not None: out_pdb_lines.append(F"""REMARK {remark}""" ) __a : List[List[str]] if prot.parents is not None and len(prot.parents ) > 0: __a : List[Any] = [] if prot.parents_chain_index is not None: __a : Dict[str, List[str]] = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(_SCREAMING_SNAKE_CASE ) , [] ) parent_dict[str(_SCREAMING_SNAKE_CASE )].append(_SCREAMING_SNAKE_CASE ) __a : Any = max([int(_SCREAMING_SNAKE_CASE ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): __a : str = parent_dict.get(str(_SCREAMING_SNAKE_CASE ) , ['N/A'] ) parents_per_chain.append(_SCREAMING_SNAKE_CASE ) else: parents_per_chain.append(list(prot.parents ) ) else: __a : List[str] = [['N/A']] def make_parent_line(_SCREAMING_SNAKE_CASE : Sequence[str] ) -> str: return F"""PARENT {" ".join(_SCREAMING_SNAKE_CASE )}""" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) __a : int = 0 for i, l in enumerate(_SCREAMING_SNAKE_CASE ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(_SCREAMING_SNAKE_CASE ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(_SCREAMING_SNAKE_CASE ): __a : Optional[int] = parents_per_chain[chain_counter] else: __a : int = ['N/A'] out_pdb_lines.append(make_parent_line(_SCREAMING_SNAKE_CASE ) ) return "\n".join(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Protein ): __a : Union[str, Any] = residue_constants.restypes + ['X'] def res_atoa(_SCREAMING_SNAKE_CASE : int ) -> str: return residue_constants.restype_atoa.get(restypes[r] , 'UNK' ) __a : Any = residue_constants.atom_types __a : List[str] = [] __a : str = prot.atom_mask __a : str = prot.aatype __a : str = prot.atom_positions __a : Union[str, Any] = prot.residue_index.astype(np.intaa ) __a : int = prot.b_factors __a : Tuple = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('Invalid aatypes.' ) __a : Tuple = get_pdb_headers(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: pdb_lines.extend(_SCREAMING_SNAKE_CASE ) __a : List[Any] = aatype.shape[0] __a : Any = 1 __a : List[Any] = 0 __a : Optional[Any] = string.ascii_uppercase __a : Any = None # Add all atom sites. for i in range(_SCREAMING_SNAKE_CASE ): __a : Optional[Any] = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(_SCREAMING_SNAKE_CASE , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue __a : int = 'ATOM' __a : Tuple = atom_name if len(_SCREAMING_SNAKE_CASE ) == 4 else F""" {atom_name}""" __a : Union[str, Any] = '' __a : Tuple = '' __a : str = 1.0_0 __a : Union[str, Any] = atom_name[0] # Protein supports only C, N, O, S, this works. __a : str = '' __a : List[Any] = 'A' if chain_index is not None: __a : Any = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! __a : int = ( F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}""" F"""{res_name_a:>3} {chain_tag:>1}""" F"""{residue_index[i]:>4}{insertion_code:>1} """ F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}""" F"""{occupancy:>6.2f}{b_factor:>6.2f} """ F"""{element:>2}{charge:>2}""" ) pdb_lines.append(_SCREAMING_SNAKE_CASE ) atom_index += 1 __a : Union[str, Any] = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: __a : Tuple = True __a : Dict = chain_index[i + 1] if should_terminate: # Close the chain. __a : int = 'TER' __a : List[Any] = ( F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}""" ) pdb_lines.append(_SCREAMING_SNAKE_CASE ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) pdb_lines.append('END' ) pdb_lines.append('' ) return "\n".join(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Protein ): return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def lowerCamelCase (_SCREAMING_SNAKE_CASE : FeatureDict , _SCREAMING_SNAKE_CASE : ModelOutput , _SCREAMING_SNAKE_CASE : Optional[np.ndarray] = None , _SCREAMING_SNAKE_CASE : Optional[np.ndarray] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[Sequence[str]] = None , _SCREAMING_SNAKE_CASE : Optional[Sequence[int]] = None , ): return Protein( aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=_SCREAMING_SNAKE_CASE , remark=_SCREAMING_SNAKE_CASE , parents=_SCREAMING_SNAKE_CASE , parents_chain_index=_SCREAMING_SNAKE_CASE , )
27
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[int] ) -> Any: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : str = -1 UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ) UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ) model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer UpperCAmelCase_ : Optional[int] = cs.out[:-1] self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: Dict ) -> Optional[Any]: UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = -1 UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ) UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] ) UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ ) thread.start() UpperCAmelCase_ : int = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: List[Any] ) -> Dict: UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = -1 UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ) UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :] UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ ) model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer UpperCAmelCase_ : List[str] = cs.out[:-1] self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: str ) -> str: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" ) UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Any = -1 UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id with CaptureStdout() as cs: UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ ) model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n" UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" ) self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) ) def A__ ( self: List[str] ) -> Any: UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = -1 UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 ) UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(lowerCamelCase_ ): UpperCAmelCase_ : Union[str, Any] = """""" for new_text in streamer: streamer_text += new_text
345
0
"""simple docstring""" import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __lowercase ( unittest.TestCase ): '''simple docstring''' def _lowerCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def _lowerCamelCase ( self ): __a , __a : List[str] = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) __a : Union[str, Any] = '''A painting of a squirrel eating a burger''' __a : int = jax.device_count() __a : List[Any] = num_samples * [prompt] __a : Union[str, Any] = sd_pipe.prepare_inputs(_UpperCAmelCase ) __a : Optional[int] = replicate(_UpperCAmelCase ) __a : List[str] = shard(_UpperCAmelCase ) __a : Dict = jax.random.PRNGKey(0 ) __a : List[str] = jax.random.split(_UpperCAmelCase , jax.device_count() ) __a : Tuple = sd_pipe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , num_inference_steps=25 , jit=_UpperCAmelCase )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __a : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __a : str = images[0, 253:256, 253:256, -1] __a : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __a : Tuple = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def _lowerCamelCase ( self ): __a : Optional[int] = '''stabilityai/stable-diffusion-2''' __a , __a : Optional[int] = FlaxDPMSolverMultistepScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' ) __a , __a : int = FlaxStableDiffusionPipeline.from_pretrained( _UpperCAmelCase , scheduler=_UpperCAmelCase , revision='''bf16''' , dtype=jnp.bfloataa , ) __a : Dict = scheduler_params __a : Optional[Any] = '''A painting of a squirrel eating a burger''' __a : List[str] = jax.device_count() __a : Optional[int] = num_samples * [prompt] __a : Optional[int] = sd_pipe.prepare_inputs(_UpperCAmelCase ) __a : List[str] = replicate(_UpperCAmelCase ) __a : Dict = shard(_UpperCAmelCase ) __a : Dict = jax.random.PRNGKey(0 ) __a : Optional[int] = jax.random.split(_UpperCAmelCase , jax.device_count() ) __a : Union[str, Any] = sd_pipe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , num_inference_steps=25 , jit=_UpperCAmelCase )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __a : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __a : str = images[0, 253:256, 253:256, -1] __a : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __a : Any = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
188
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A = { '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
188
1
import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( 'compression_format, is_archive' , [ ('7z', True), ('bz2', False), ('gzip', False), ('lz4', False), ('tar', True), ('xz', False), ('zip', True), ('zstd', False), ] , ) def UpperCamelCase ( snake_case__ : Any , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : str , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : str , ) -> str: UpperCamelCase : Optional[Any] = { '7z': (seven_zip_file, SevenZipExtractor), 'bz2': (bza_file, BzipaExtractor), 'gzip': (gz_file, GzipExtractor), 'lz4': (lza_file, LzaExtractor), 'tar': (tar_file, TarExtractor), 'xz': (xz_file, XzExtractor), 'zip': (zip_file, ZipExtractor), 'zstd': (zstd_file, ZstdExtractor), } UpperCamelCase , UpperCamelCase : Dict = input_paths_and_base_extractors[compression_format] if input_path is None: UpperCamelCase : List[str] = F"""for '{compression_format}' compression_format, """ if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case__ ) assert base_extractor.is_extractable(snake_case__ ) UpperCamelCase : int = tmp_path / ('extracted' if is_archive else 'extracted.txt') base_extractor.extract(snake_case__ , snake_case__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name UpperCamelCase : List[str] = file_path.read_text(encoding='utf-8' ) else: UpperCamelCase : Optional[Any] = output_path.read_text(encoding='utf-8' ) UpperCamelCase : List[Any] = text_file.read_text(encoding='utf-8' ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( 'compression_format, is_archive' , [ ('7z', True), ('bz2', False), ('gzip', False), ('lz4', False), ('tar', True), ('xz', False), ('zip', True), ('zstd', False), ] , ) def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : int , ) -> List[Any]: UpperCamelCase : Optional[int] = { '7z': seven_zip_file, 'bz2': bza_file, 'gzip': gz_file, 'lz4': lza_file, 'tar': tar_file, 'xz': xz_file, 'zip': zip_file, 'zstd': zstd_file, } UpperCamelCase : Optional[int] = input_paths[compression_format] if input_path is None: UpperCamelCase : Optional[Any] = F"""for '{compression_format}' compression_format, """ if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case__ ) UpperCamelCase : Union[str, Any] = Extractor.infer_extractor_format(snake_case__ ) assert extractor_format is not None UpperCamelCase : List[Any] = tmp_path / ('extracted' if is_archive else 'extracted.txt') Extractor.extract(snake_case__ , snake_case__ , snake_case__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name UpperCamelCase : List[str] = file_path.read_text(encoding='utf-8' ) else: UpperCamelCase : Union[str, Any] = output_path.read_text(encoding='utf-8' ) UpperCamelCase : int = text_file.read_text(encoding='utf-8' ) assert extracted_file_content == expected_file_content @pytest.fixture def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : str ) -> List[str]: import tarfile UpperCamelCase : Optional[Any] = tmp_path / 'data_dot_dot' directory.mkdir() UpperCamelCase : Optional[int] = directory / 'tar_file_with_dot_dot.tar' with tarfile.TarFile(snake_case__ , 'w' ) as f: f.add(snake_case__ , arcname=os.path.join('..' , text_file.name ) ) return path @pytest.fixture def UpperCamelCase ( snake_case__ : Any ) -> Tuple: import tarfile UpperCamelCase : Dict = tmp_path / 'data_sym_link' directory.mkdir() UpperCamelCase : Tuple = directory / 'tar_file_with_sym_link.tar' os.symlink('..' , directory / 'subdir' , target_is_directory=snake_case__ ) with tarfile.TarFile(snake_case__ , 'w' ) as f: f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( 'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , ) def UpperCamelCase ( snake_case__ : str , snake_case__ : int , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ) -> Tuple: UpperCamelCase : Optional[int] = { 'tar_file_with_dot_dot': tar_file_with_dot_dot, 'tar_file_with_sym_link': tar_file_with_sym_link, } UpperCamelCase : Optional[int] = insecure_tar_files[insecure_tar_file] UpperCamelCase : List[Any] = tmp_path / 'extracted' TarExtractor.extract(snake_case__ , snake_case__ ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def UpperCamelCase ( snake_case__ : List[str] ) -> str: # We should have less false positives than zipfile.is_zipfile # We do that by checking only the magic number UpperCamelCase : Optional[Any] = tmpdir / 'not_a_zip_file' # From: https://github.com/python/cpython/pull/5053 UpperCamelCase : int = ( B'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00' B'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I' B'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07' B'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82' ) with not_a_zip_file.open('wb' ) as f: f.write(snake_case__ ) assert zipfile.is_zipfile(str(snake_case__ ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(snake_case__ ) # but we're right
119
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { '''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''', '''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''', '''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''', '''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''', '''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''', '''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''', '''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''', '''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''', '''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''', '''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''', '''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''', '''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''', } class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : List[str] = "codegen" UpperCAmelCase__ : str = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, SCREAMING_SNAKE_CASE_=5_0400, SCREAMING_SNAKE_CASE_=2048, SCREAMING_SNAKE_CASE_=2048, SCREAMING_SNAKE_CASE_=4096, SCREAMING_SNAKE_CASE_=28, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="gelu_new", SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1e-5, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=5_0256, SCREAMING_SNAKE_CASE_=5_0256, SCREAMING_SNAKE_CASE_=False, **SCREAMING_SNAKE_CASE_, ) -> Tuple: UpperCamelCase : Tuple = vocab_size UpperCamelCase : Optional[int] = n_ctx UpperCamelCase : Optional[int] = n_positions UpperCamelCase : List[str] = n_embd UpperCamelCase : Dict = n_layer UpperCamelCase : int = n_head UpperCamelCase : Union[str, Any] = n_inner UpperCamelCase : int = rotary_dim UpperCamelCase : Optional[Any] = activation_function UpperCamelCase : Optional[int] = resid_pdrop UpperCamelCase : Union[str, Any] = embd_pdrop UpperCamelCase : Optional[Any] = attn_pdrop UpperCamelCase : List[str] = layer_norm_epsilon UpperCamelCase : Union[str, Any] = initializer_range UpperCamelCase : str = use_cache UpperCamelCase : Dict = bos_token_id UpperCamelCase : Union[str, Any] = eos_token_id super().__init__( bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, tie_word_embeddings=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase_ ( a__ ): def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = "default", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, ) -> List[str]: super().__init__(SCREAMING_SNAKE_CASE_, task=SCREAMING_SNAKE_CASE_, patching_specs=SCREAMING_SNAKE_CASE_, use_past=SCREAMING_SNAKE_CASE_ ) if not getattr(self._config, 'pad_token_id', SCREAMING_SNAKE_CASE_ ): # TODO: how to do that better? UpperCamelCase : str = 0 @property def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]: UpperCamelCase : Tuple = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_, direction='inputs' ) UpperCamelCase : List[Any] = {0: 'batch', 1: 'past_sequence + sequence'} else: UpperCamelCase : Optional[int] = {0: 'batch', 1: 'sequence'} return common_inputs @property def snake_case_ ( self ) -> int: return self._config.n_layer @property def snake_case_ ( self ) -> int: return self._config.n_head def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, ) -> Mapping[str, Any]: UpperCamelCase : Tuple = super(SCREAMING_SNAKE_CASE_, self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE_, batch_size=SCREAMING_SNAKE_CASE_, seq_length=SCREAMING_SNAKE_CASE_, is_pair=SCREAMING_SNAKE_CASE_, framework=SCREAMING_SNAKE_CASE_ ) # We need to order the input in the way they appears in the forward() UpperCamelCase : Optional[int] = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch UpperCamelCase , UpperCamelCase : List[str] = common_inputs['input_ids'].shape # Not using the same length for past_key_values UpperCamelCase : List[Any] = seqlen + 2 UpperCamelCase : List[str] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) UpperCamelCase : str = [ (torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers ) ] UpperCamelCase : List[Any] = common_inputs['attention_mask'] if self.use_past: UpperCamelCase : Optional[Any] = ordered_inputs['attention_mask'].dtype UpperCamelCase : List[Any] = torch.cat( [ordered_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, dtype=SCREAMING_SNAKE_CASE_ )], dim=1 ) return ordered_inputs @property def snake_case_ ( self ) -> int: return 13
119
1
"""simple docstring""" import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer __snake_case : Any = logging.get_logger(__name__) __snake_case : str = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} __snake_case : Optional[int] = { "vocab_file": { "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json", }, "merges_file": { "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt", }, "tokenizer_file": { "Salesforce/codegen-350M-mono": ( "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json" ), }, } __snake_case : Union[str, Any] = { "Salesforce/codegen-350M-mono": 20_48, } class __SCREAMING_SNAKE_CASE ( __lowerCAmelCase): _SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : Any = ["input_ids", "attention_mask"] _SCREAMING_SNAKE_CASE : Dict = CodeGenTokenizer def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="<|endoftext|>" , _UpperCamelCase="<|endoftext|>" , _UpperCamelCase="<|endoftext|>" , _UpperCamelCase=False , **_UpperCamelCase , ): """simple docstring""" super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , ) if kwargs.pop('add_bos_token' , lowerCamelCase__ ): lowerCAmelCase__ = kwargs.pop('name_or_path' , '' ) raise ValueError( 'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.' 'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n' F"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n" F"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n" 'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.' ' so that the fast tokenizer works correctly.' ) lowerCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , lowerCamelCase__ ) != add_prefix_space: lowerCAmelCase__ = getattr(lowerCamelCase__ , pre_tok_state.pop('type' ) ) lowerCAmelCase__ = add_prefix_space lowerCAmelCase__ = pre_tok_class(**lowerCamelCase__ ) lowerCAmelCase__ = add_prefix_space def UpperCamelCase__ ( self , *_UpperCamelCase , **_UpperCamelCase ): """simple docstring""" lowerCAmelCase__ = kwargs.get('is_split_into_words' , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def UpperCamelCase__ ( self , *_UpperCamelCase , **_UpperCamelCase ): """simple docstring""" lowerCAmelCase__ = kwargs.get('is_split_into_words' , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ): """simple docstring""" lowerCAmelCase__ = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ): """simple docstring""" lowerCAmelCase__ = super().decode( token_ids=lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ , **lowerCamelCase__ , ) if truncate_before_pattern is not None and len(lowerCamelCase__ ) > 0: lowerCAmelCase__ = self.truncate(lowerCamelCase__ , lowerCamelCase__ ) return decoded_text def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" def find_re(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): lowerCAmelCase__ = pattern.search(lowerCamelCase__ , lowerCamelCase__ ) return m.start() if m else -1 lowerCAmelCase__ = [re.compile(lowerCamelCase__ , re.MULTILINE ) for pattern in truncate_before_pattern] lowerCAmelCase__ = list(re.finditer('^print' , lowerCamelCase__ , re.MULTILINE ) ) if len(lowerCamelCase__ ) > 1: lowerCAmelCase__ = completion[: prints[1].start()] lowerCAmelCase__ = list(re.finditer('^def' , lowerCamelCase__ , re.MULTILINE ) ) if len(lowerCamelCase__ ) > 1: lowerCAmelCase__ = completion[: defs[1].start()] lowerCAmelCase__ = 0 lowerCAmelCase__ = [ pos for pos in [find_re(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) for terminal in terminals] if pos != -1 ] if len(lowerCamelCase__ ) > 0: return completion[: min(lowerCamelCase__ )] else: return completion
357
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __snake_case : Dict = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : str = [ """FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FocalNetForImageClassification""", """FocalNetForMaskedImageModeling""", """FocalNetBackbone""", """FocalNetModel""", """FocalNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys __snake_case : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
122
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: __UpperCamelCase : Dict = None __UpperCamelCase : int = logging.get_logger(__name__) __UpperCamelCase : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} __UpperCamelCase : int = { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", }, "tokenizer_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json", }, } __UpperCamelCase : Tuple = { "camembert-base": 512, } __UpperCamelCase : Dict = "▁" class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ = ['''input_ids''', '''attention_mask'''] UpperCamelCase__ = CamembertTokenizer def __init__( self :Dict , __magic_name__ :int=None , __magic_name__ :Optional[int]=None , __magic_name__ :List[str]="<s>" , __magic_name__ :Tuple="</s>" , __magic_name__ :str="</s>" , __magic_name__ :Optional[int]="<s>" , __magic_name__ :Dict="<unk>" , __magic_name__ :Tuple="<pad>" , __magic_name__ :Dict="<mask>" , __magic_name__ :Dict=["<s>NOTUSED", "</s>NOTUSED"] , **__magic_name__ :str , ): '''simple docstring''' a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token super().__init__( __magic_name__ , tokenizer_file=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , ) a = vocab_file a = False if not self.vocab_file else True def lowerCamelCase__ ( self :List[str] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a = [self.cls_token_id] a = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase__ ( self :Dict , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ): '''simple docstring''' a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase__ ( self :Tuple , __magic_name__ :str , __magic_name__ :Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__magic_name__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return a = os.path.join( __magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ): copyfile(self.vocab_file , __magic_name__ ) return (out_vocab_file,)
228
import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = CLIPTokenizer UpperCamelCase__ = CLIPTokenizerFast UpperCamelCase__ = True UpperCamelCase__ = {} UpperCamelCase__ = False def lowerCamelCase__ ( self :str ): '''simple docstring''' super().setUp() # fmt: off a = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on a = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) a = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""] a = {"""unk_token""": """<unk>"""} a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__magic_name__ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__magic_name__ ) ) def lowerCamelCase__ ( self :Optional[Any] , **__magic_name__ :Union[str, Any] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ ) def lowerCamelCase__ ( self :int , **__magic_name__ :Optional[Any] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ ) def lowerCamelCase__ ( self :int , __magic_name__ :List[str] ): '''simple docstring''' a = """lower newer""" a = """lower newer""" return input_text, output_text def lowerCamelCase__ ( self :int ): '''simple docstring''' a = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) a = """lower newer""" a = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""] a = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) a = tokens + [tokenizer.unk_token] a = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ ) @require_ftfy def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): a = self.tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ ) a = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ ) a = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d.""" a = tokenizer_s.tokenize(__magic_name__ ) a = tokenizer_r.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways a = """xa\u0303y""" + """ """ + """x\xe3y""" a = tokenizer_s.tokenize(__magic_name__ ) a = tokenizer_r.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) # Test that the tokenization is identical on unicode of space type a = [ """\u0009""", # (horizontal tab, '\t') """\u000B""", # (vertical tab) """\u000C""", # (form feed) """\u0020""", # (space, ' ') """\u200E""", # (left-to-right mark):w """\u200F""", # (right-to-left mark) ] for unicode_seq in spaces_unicodes: a = tokenizer_s.tokenize(__magic_name__ ) a = tokenizer_r.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) # Test that the tokenization is identical on unicode of line break type a = [ """\u000A""", # (line feed, '\n') """\r\n""", # (carriage return and line feed, '\r\n') """\u000D""", # (carriage return, '\r') """\r""", # (carriage return, '\r') """\u000D""", # (carriage return, '\r') """\u2028""", # (line separator) """\u2029""", # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: a = tokenizer_s.tokenize(__magic_name__ ) a = tokenizer_r.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) def lowerCamelCase__ ( self :int ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): a = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` a = F'{text_of_1_token} {text_of_1_token}' a = self.rust_tokenizer_class.from_pretrained( __magic_name__ , use_fast=__magic_name__ , ) a = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__magic_name__ ) + 1, len(__magic_name__ ) + 1 + len(__magic_name__ )) , ) a = F' {text}' a = self.rust_tokenizer_class.from_pretrained( __magic_name__ , use_fast=__magic_name__ , ) a = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__magic_name__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__magic_name__ ) + 1, 1 + len(__magic_name__ ) + 1 + len(__magic_name__ )) , ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' with self.assertRaises(__magic_name__ ) as context: self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" ) self.assertTrue( context.exception.args[0].startswith( """The `backend_tokenizer` provided does not match the expected format.""" ) ) @require_ftfy def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' super().test_tokenization_python_rust_equals() def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' pass
228
1
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar _UpperCamelCase : str = TypeVar('T') class snake_case ( Generic[T] ): def __init__( self : Optional[int] , A : List[Any] ): '''simple docstring''' a : Dict = data a : Node[T] | None = None def __str__( self : Dict ): '''simple docstring''' return F'''{self.data}''' class snake_case ( Generic[T] ): def __init__( self : Dict ): '''simple docstring''' a : Node[T] | None = None def __iter__( self : List[str] ): '''simple docstring''' a : int = self.top while node: yield node.data a : List[str] = node.next def __str__( self : str ): '''simple docstring''' return "->".join([str(snake_case__ ) for item in self] ) def __len__( self : Tuple ): '''simple docstring''' return len(tuple(iter(self ) ) ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' return self.top is None def lowerCamelCase__ ( self : Any , A : List[Any] ): '''simple docstring''' a : List[str] = Node(snake_case__ ) if not self.is_empty(): a : Union[str, Any] = self.top a : Optional[Any] = node def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , snake_case__ ) a : List[Any] = self.top a : Dict = self.top.next return pop_node.data def lowerCamelCase__ ( self : Dict ): '''simple docstring''' if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def lowerCamelCase__ ( self : int ): '''simple docstring''' a : List[Any] = None if __name__ == "__main__": from doctest import testmod testmod()
362
"""simple docstring""" _UpperCamelCase : List[Any] = 8.31_44_62 # Unit - J mol-1 K-1 def snake_case (A_ :float , A_ :float , A_ :float ): '''simple docstring''' if moles < 0 or kelvin < 0 or volume < 0: raise ValueError('Invalid inputs. Enter positive value.' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def snake_case (A_ :float , A_ :float , A_ :float ): '''simple docstring''' if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError('Invalid inputs. Enter positive value.' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
186
0
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int a : Optional[Any] = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): lowerCamelCase : Optional[datasets.Features] =None def _SCREAMING_SNAKE_CASE ( _lowercase : "pyspark.sql.DataFrame" , _lowercase : List[int] , ) ->Union[str, Any]: '''simple docstring''' import pyspark def generate_fn(): a : int = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) ) for partition_id in partition_order: a : Optional[int] = df_with_partition_id.select("*" ).where(F"""part_id = {partition_id}""" ).drop("part_id" ) a : Dict = partition_df.collect() a : Dict = 0 for row in rows: yield F"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class __UpperCamelCase ( _BaseExamplesIterable ): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=None , ) -> List[str]: a : int = df a : Tuple = partition_order or range(self.df.rdd.getNumPartitions() ) a : Dict = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self ) -> Dict: yield from self.generate_examples_fn() def __a ( self , lowerCAmelCase__ ) -> "SparkExamplesIterable": a : Dict = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(lowerCAmelCase__ ) return SparkExamplesIterable(self.df , partition_order=lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> "SparkExamplesIterable": a : Optional[Any] = self.split_shard_indices_by_worker(lowerCAmelCase__ , lowerCAmelCase__ ) return SparkExamplesIterable(self.df , partition_order=lowerCAmelCase__ ) @property def __a ( self ) -> int: return len(self.partition_order ) class __UpperCamelCase ( datasets.DatasetBuilder ): lowerCamelCase : Dict =SparkConfig def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Tuple: import pyspark a : List[str] = pyspark.sql.SparkSession.builder.getOrCreate() a : List[Any] = df a : Tuple = working_dir super().__init__( cache_dir=lowerCAmelCase__ , config_name=str(self.df.semanticHash() ) , **lowerCAmelCase__ , ) def __a ( self ) -> Tuple: # Returns the path of the created file. def create_cache_and_write_probe(lowerCAmelCase__ ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=lowerCAmelCase__ ) a : Dict = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(lowerCAmelCase__ , "a" ) return [probe_file] if self._spark.conf.get("spark.master" , "" ).startswith("local" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: a : Union[str, Any] = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowerCAmelCase__ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( "When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" ) def __a ( self ) -> str: return datasets.DatasetInfo(features=self.config.features ) def __a ( self , lowerCAmelCase__ ) -> Dict: return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def __a ( self , lowerCAmelCase__ ) -> List[str]: import pyspark def get_arrow_batch_size(lowerCAmelCase__ ): for batch in it: yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} ) a : Dict = self.df.count() a : str = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. a : Optional[Any] = ( self.df.limit(lowerCAmelCase__ ) .repartition(1 ) .mapInArrow(lowerCAmelCase__ , "batch_bytes: long" ) .agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) ) .collect()[0] .sample_bytes / sample_num_rows ) a : Any = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. a : Any = min(lowerCAmelCase__ , int(approx_total_size / max_shard_size ) ) a : Union[str, Any] = self.df.repartition(lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]: import pyspark a : List[Any] = ParquetWriter if file_format == "parquet" else ArrowWriter a : str = os.path.join(self._working_dir , os.path.basename(lowerCAmelCase__ ) ) if self._working_dir else fpath a : List[Any] = file_format == "parquet" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. a : Optional[Any] = self.config.features a : Tuple = self._writer_batch_size a : Union[str, Any] = self._fs.storage_options def write_arrow(lowerCAmelCase__ ): # Within the same SparkContext, no two task attempts will share the same attempt ID. a : Optional[int] = pyspark.TaskContext().taskAttemptId() a : Dict = next(lowerCAmelCase__ , lowerCAmelCase__ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , ) a : List[str] = 0 a : Optional[int] = writer_class( features=lowerCAmelCase__ , path=working_fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , writer_batch_size=lowerCAmelCase__ , storage_options=lowerCAmelCase__ , embed_local_files=lowerCAmelCase__ , ) a : int = pa.Table.from_batches([first_batch] ) writer.write_table(lowerCAmelCase__ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: a, a : List[str] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , ) shard_id += 1 a : List[str] = writer_class( features=writer._features , path=working_fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , writer_batch_size=lowerCAmelCase__ , storage_options=lowerCAmelCase__ , embed_local_files=lowerCAmelCase__ , ) a : List[str] = pa.Table.from_batches([batch] ) writer.write_table(lowerCAmelCase__ ) if writer._num_bytes > 0: a, a : Any = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(lowerCAmelCase__ ) ): a : Union[str, Any] = os.path.join(os.path.dirname(lowerCAmelCase__ ) , os.path.basename(lowerCAmelCase__ ) ) shutil.move(lowerCAmelCase__ , lowerCAmelCase__ ) a : Any = ( self.df.mapInArrow(lowerCAmelCase__ , "task_id: long, num_examples: long, num_bytes: long" ) .groupBy("task_id" ) .agg( pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = "arrow" , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[int]: self._validate_cache_dir() a : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(lowerCAmelCase__ ) a : Optional[int] = not is_remote_filesystem(self._fs ) a : List[str] = os.path.join if is_local else posixpath.join a : Optional[Any] = "-TTTTT-SSSSS-of-NNNNN" a : Optional[Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" a : int = path_join(self._output_dir , lowerCAmelCase__ ) a : List[Any] = 0 a : Optional[Any] = 0 a : List[Any] = 0 a : int = [] a : List[Any] = [] for task_id, content in self._prepare_split_single(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): ( ( a ), ( a ), ( a ), ( a ), ) : Optional[int] = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(lowerCAmelCase__ ) a : Tuple = total_num_examples a : List[Any] = total_num_bytes # should rename everything at the end logger.debug(f"""Renaming {total_shards} shards.""" ) if total_shards > 1: a : List[str] = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. a : List[Any] = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ): rename( lowerCAmelCase__ , fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , f"""{global_shard_id:05d}""" ).replace("NNNNN" , f"""{total_shards:05d}""" ) , ) a : Dict = [] a : List[Any] = 0 for i in range(len(lowerCAmelCase__ ) ): a, a : Any = task_id_and_num_shards[i] for shard_id in range(lowerCAmelCase__ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(lowerCAmelCase__ , len(lowerCAmelCase__ ) ).map(lambda lowerCAmelCase__ : _rename_shard(*lowerCAmelCase__ ) ).collect() else: # don't use any pattern a : Optional[Any] = 0 a : Optional[Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , fpath.replace(lowerCAmelCase__ , "" ) , ) def __a ( self , lowerCAmelCase__ , ) -> SparkExamplesIterable: return SparkExamplesIterable(self.df )
105
"""simple docstring""" from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch("socket.socket" ) @patch("builtins.open" ) def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : int ) ->str: '''simple docstring''' a : Optional[Any] = Mock() a : Dict = conn, Mock() a : Union[str, Any] = iter([1, None] ) a : Optional[int] = lambda _lowercase : next(_lowercase ) # ===== invoke ===== send_file(filename="mytext.txt" , testing=_lowercase ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
105
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json', 'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class __snake_case ( lowerCamelCase__ ): """simple docstring""" _lowerCamelCase = """mobilenet_v1""" def __init__( self , __lowerCamelCase=3 , __lowerCamelCase=224 , __lowerCamelCase=1.0 , __lowerCamelCase=8 , __lowerCamelCase="relu6" , __lowerCamelCase=True , __lowerCamelCase=0.9_9_9 , __lowerCamelCase=0.0_2 , __lowerCamelCase=0.0_0_1 , **__lowerCamelCase , ): '''simple docstring''' super().__init__(**__snake_case ) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''' ) __A : List[Any] = num_channels __A : List[str] = image_size __A : Union[str, Any] = depth_multiplier __A : int = min_depth __A : Optional[Any] = hidden_act __A : Tuple = tf_padding __A : Union[str, Any] = classifier_dropout_prob __A : str = initializer_range __A : Dict = layer_norm_eps class __snake_case ( lowerCamelCase__ ): """simple docstring""" _lowerCamelCase = version.parse("""1.11""" ) @property def UpperCamelCase__( self ): '''simple docstring''' return OrderedDict([('''pixel_values''', {0: '''batch'''})] ) @property def UpperCamelCase__( self ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})] ) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] ) @property def UpperCamelCase__( self ): '''simple docstring''' return 1e-4
358
"""simple docstring""" from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax a_ = logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE__ ) class __snake_case ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , **__lowerCamelCase ): '''simple docstring''' super().__init__(**__lowerCamelCase ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self , __lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' return super().__call__(__lowerCamelCase , **__lowerCamelCase ) def UpperCamelCase__( self , **__lowerCamelCase ): '''simple docstring''' __A : Union[str, Any] = {} if "candidate_labels" in kwargs: __A : Tuple = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: __A : List[str] = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase="This is a photo of {}." ): '''simple docstring''' __A : Optional[int] = load_image(__lowerCamelCase ) __A : Optional[int] = self.image_processor(images=[image] , return_tensors=self.framework ) __A : int = candidate_labels __A : int = [hypothesis_template.format(__lowerCamelCase ) for x in candidate_labels] __A : Dict = self.tokenizer(__lowerCamelCase , return_tensors=self.framework , padding=__lowerCamelCase ) __A : int = [text_inputs] return inputs def UpperCamelCase__( self , __lowerCamelCase ): '''simple docstring''' __A : Optional[int] = model_inputs.pop('''candidate_labels''' ) __A : str = model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0] , __lowerCamelCase ): __A : Union[str, Any] = text_inputs[0] else: # Batching case. __A : str = text_inputs[0][0] __A : List[str] = self.model(**__lowerCamelCase , **__lowerCamelCase ) __A : Dict = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_image, } return model_outputs def UpperCamelCase__( self , __lowerCamelCase ): '''simple docstring''' __A : Optional[int] = model_outputs.pop('''candidate_labels''' ) __A : int = model_outputs['''logits'''][0] if self.framework == "pt": __A : Union[str, Any] = logits.softmax(dim=-1 ).squeeze(-1 ) __A : Dict = probs.tolist() if not isinstance(__lowerCamelCase , __lowerCamelCase ): __A : List[Any] = [scores] elif self.framework == "tf": __A : List[Any] = stable_softmax(__lowerCamelCase , axis=-1 ) __A : str = probs.numpy().tolist() else: raise ValueError(F"""Unsupported framework: {self.framework}""" ) __A : str = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(__lowerCamelCase , __lowerCamelCase ) , key=lambda __lowerCamelCase : -x[0] ) ] return result
291
0
import logging import os from .state import PartialState class __magic_name__ ( logging.LoggerAdapter ): '''simple docstring''' @staticmethod def _UpperCAmelCase ( lowercase_ ) -> Dict: """simple docstring""" a__ =PartialState() return not main_process_only or (main_process_only and state.is_main_process) def _UpperCAmelCase ( self, lowercase_, lowercase_, *lowercase_, **lowercase_ ) -> Union[str, Any]: """simple docstring""" if PartialState._shared_state == {}: raise RuntimeError( '''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' ) a__ =kwargs.pop('''main_process_only''', lowercase_ ) a__ =kwargs.pop('''in_order''', lowercase_ ) if self.isEnabledFor(lowercase_ ): if self._should_log(lowercase_ ): a__, a__ =self.process(lowercase_, lowercase_ ) self.logger.log(lowercase_, lowercase_, *lowercase_, **lowercase_ ) elif in_order: a__ =PartialState() for i in range(state.num_processes ): if i == state.process_index: a__, a__ =self.process(lowercase_, lowercase_ ) self.logger.log(lowercase_, lowercase_, *lowercase_, **lowercase_ ) state.wait_for_everyone() def UpperCAmelCase__ ( _A : str , _A : str = None ): '''simple docstring''' if log_level is None: a__ =os.environ.get('''ACCELERATE_LOG_LEVEL''' , _A ) a__ =logging.getLogger(_A ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(_A , {} )
188
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ : List[Any] = IFInpaintingSuperResolutionPipeline lowerCamelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'} lowerCamelCase__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} ) lowerCamelCase__ : str = PipelineTesterMixin.required_optional_params - {'latents'} def _UpperCAmelCase ( self ) -> Optional[int]: """simple docstring""" return self._get_superresolution_dummy_components() def _UpperCAmelCase ( self, lowercase_, lowercase_=0 ) -> Tuple: """simple docstring""" if str(lowercase_ ).startswith('''mps''' ): a__ =torch.manual_seed(lowercase_ ) else: a__ =torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) a__ =floats_tensor((1, 3, 16, 16), rng=random.Random(lowercase_ ) ).to(lowercase_ ) a__ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowercase_ ) ).to(lowercase_ ) a__ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowercase_ ) ).to(lowercase_ ) a__ ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', ) def _UpperCAmelCase ( self ) -> List[str]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def _UpperCAmelCase ( self ) -> int: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''', reason='''float16 requires CUDA''' ) def _UpperCAmelCase ( self ) -> List[Any]: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def _UpperCAmelCase ( self ) -> int: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def _UpperCAmelCase ( self ) -> Optional[Any]: """simple docstring""" self._test_save_load_local() def _UpperCAmelCase ( self ) -> str: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2, )
188
1
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed UpperCAmelCase : List[str] = { "distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), "roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), "bert": (BertConfig, BertForMaskedLM, BertTokenizer), "gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def __lowerCamelCase ( lowerCamelCase__ : int ): '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Dict ): '''simple docstring''' if args.student_type == "roberta": lowerCamelCase = False elif args.student_type == "gpt2": lowerCamelCase = False def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] ): '''simple docstring''' if args.student_type == "roberta": lowerCamelCase = False def __lowerCamelCase ( ): '''simple docstring''' lowerCamelCase = argparse.ArgumentParser(description="""Training""" ) parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" ) parser.add_argument( """--dump_path""" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""The output directory (log, checkpoints, parameters, etc.)""" ) parser.add_argument( """--data_file""" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , ) parser.add_argument( """--student_type""" , type=lowerCamelCase__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=lowerCamelCase__ , help="""The student type (DistilBERT, RoBERTa).""" , ) parser.add_argument("""--student_config""" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""Path to the student configuration.""" ) parser.add_argument( """--student_pretrained_weights""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""Load student initialization checkpoint.""" ) parser.add_argument( """--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=lowerCamelCase__ , help="""Teacher type (BERT, RoBERTa).""" ) parser.add_argument("""--teacher_name""" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""The teacher model.""" ) parser.add_argument("""--temperature""" , default=2.0 , type=lowerCamelCase__ , help="""Temperature for the softmax temperature.""" ) parser.add_argument( """--alpha_ce""" , default=0.5 , type=lowerCamelCase__ , help="""Linear weight for the distillation loss. Must be >=0.""" ) parser.add_argument( """--alpha_mlm""" , default=0.0 , type=lowerCamelCase__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , ) parser.add_argument("""--alpha_clm""" , default=0.5 , type=lowerCamelCase__ , help="""Linear weight for the CLM loss. Must be >=0.""" ) parser.add_argument("""--alpha_mse""" , default=0.0 , type=lowerCamelCase__ , help="""Linear weight of the MSE loss. Must be >=0.""" ) parser.add_argument( """--alpha_cos""" , default=0.0 , type=lowerCamelCase__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""" ) parser.add_argument( """--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" ) parser.add_argument( """--mlm_mask_prop""" , default=0.1_5 , type=lowerCamelCase__ , help="""Proportion of tokens for which we need to make a prediction.""" , ) parser.add_argument("""--word_mask""" , default=0.8 , type=lowerCamelCase__ , help="""Proportion of tokens to mask out.""" ) parser.add_argument("""--word_keep""" , default=0.1 , type=lowerCamelCase__ , help="""Proportion of tokens to keep.""" ) parser.add_argument("""--word_rand""" , default=0.1 , type=lowerCamelCase__ , help="""Proportion of tokens to randomly replace.""" ) parser.add_argument( """--mlm_smoothing""" , default=0.7 , type=lowerCamelCase__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , ) parser.add_argument("""--token_counts""" , type=lowerCamelCase__ , help="""The token counts in the data_file for MLM.""" ) parser.add_argument( """--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , ) parser.add_argument( """--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , ) parser.add_argument( """--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , ) parser.add_argument("""--n_epoch""" , type=lowerCamelCase__ , default=3 , help="""Number of pass on the whole dataset.""" ) parser.add_argument("""--batch_size""" , type=lowerCamelCase__ , default=5 , help="""Batch size (for each process).""" ) parser.add_argument( """--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , ) parser.add_argument( """--gradient_accumulation_steps""" , type=lowerCamelCase__ , default=50 , help="""Gradient accumulation for larger training batches.""" , ) parser.add_argument("""--warmup_prop""" , default=0.0_5 , type=lowerCamelCase__ , help="""Linear warmup proportion.""" ) parser.add_argument("""--weight_decay""" , default=0.0 , type=lowerCamelCase__ , help="""Weight decay if we apply some.""" ) parser.add_argument("""--learning_rate""" , default=5E-4 , type=lowerCamelCase__ , help="""The initial learning rate for Adam.""" ) parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=lowerCamelCase__ , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""" , default=5.0 , type=lowerCamelCase__ , help="""Max gradient norm.""" ) parser.add_argument("""--initializer_range""" , default=0.0_2 , type=lowerCamelCase__ , help="""Random initialization range.""" ) parser.add_argument( """--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , ) parser.add_argument( """--fp16_opt_level""" , type=lowerCamelCase__ , default="""O1""" , help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ) , ) parser.add_argument("""--n_gpu""" , type=lowerCamelCase__ , default=1 , help="""Number of GPUs in the node.""" ) parser.add_argument("""--local_rank""" , type=lowerCamelCase__ , default=-1 , help="""Distributed training - Local rank""" ) parser.add_argument("""--seed""" , type=lowerCamelCase__ , default=56 , help="""Random seed""" ) parser.add_argument("""--log_interval""" , type=lowerCamelCase__ , default=500 , help="""Tensorboard logging interval.""" ) parser.add_argument("""--checkpoint_interval""" , type=lowerCamelCase__ , default=4000 , help="""Checkpoint interval.""" ) lowerCamelCase = parser.parse_args() sanity_checks(lowerCamelCase__ ) # ARGS # init_gpu_params(lowerCamelCase__ ) set_seed(lowerCamelCase__ ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite' """ itUse `--force` if you want to overwrite it""" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f'Experiment will be dumped and logged in {args.dump_path}' ) # SAVE PARAMS # logger.info(f'Param: {args}' ) with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f: json.dump(vars(lowerCamelCase__ ) , lowerCamelCase__ , indent=4 ) git_log(args.dump_path ) lowerCamelCase , lowerCamelCase , lowerCamelCase = MODEL_CLASSES[args.student_type] lowerCamelCase , lowerCamelCase , lowerCamelCase = MODEL_CLASSES[args.teacher_type] # TOKENIZER # lowerCamelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name ) lowerCamelCase = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): lowerCamelCase = tokenizer.all_special_tokens.index(lowerCamelCase__ ) lowerCamelCase = tokenizer.all_special_ids[idx] logger.info(f'Special tokens {special_tok_ids}' ) lowerCamelCase = special_tok_ids lowerCamelCase = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f'Loading data from {args.data_file}' ) with open(args.data_file , """rb""" ) as fp: lowerCamelCase = pickle.load(lowerCamelCase__ ) if args.mlm: logger.info(f'Loading token counts from {args.token_counts} (already pre-computed)' ) with open(args.token_counts , """rb""" ) as fp: lowerCamelCase = pickle.load(lowerCamelCase__ ) lowerCamelCase = np.maximum(lowerCamelCase__ , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): lowerCamelCase = 0.0 # do not predict special tokens lowerCamelCase = torch.from_numpy(lowerCamelCase__ ) else: lowerCamelCase = None lowerCamelCase = LmSeqsDataset(params=lowerCamelCase__ , data=lowerCamelCase__ ) logger.info("""Data loader created.""" ) # STUDENT # logger.info(f'Loading student config from {args.student_config}' ) lowerCamelCase = student_config_class.from_pretrained(args.student_config ) lowerCamelCase = True if args.student_pretrained_weights is not None: logger.info(f'Loading pretrained weights from {args.student_pretrained_weights}' ) lowerCamelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowerCamelCase__ ) else: lowerCamelCase = student_model_class(lowerCamelCase__ ) if args.n_gpu > 0: student.to(f'cuda:{args.local_rank}' ) logger.info("""Student loaded.""" ) # TEACHER # lowerCamelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowerCamelCase__ ) if args.n_gpu > 0: teacher.to(f'cuda:{args.local_rank}' ) logger.info(f'Teacher loaded from {args.teacher_name}.' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(lowerCamelCase__ , lowerCamelCase__ ) if args.freeze_token_type_embds: freeze_token_type_embeddings(lowerCamelCase__ , lowerCamelCase__ ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() lowerCamelCase = Distiller( params=lowerCamelCase__ , dataset=lowerCamelCase__ , token_probs=lowerCamelCase__ , student=lowerCamelCase__ , teacher=lowerCamelCase__ ) distiller.train() logger.info("""Let's go get some drinks.""" ) if __name__ == "__main__": main()
66
import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) UpperCAmelCase : Optional[int] = logging.getLogger(__name__) UpperCAmelCase : Dict = tf.data.AUTOTUNE def __lowerCamelCase ( ): '''simple docstring''' lowerCamelCase = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""" , type=lowerCamelCase__ , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , ) parser.add_argument( """--tokenizer""" , type=lowerCamelCase__ , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , ) parser.add_argument( """--per_replica_batch_size""" , type=lowerCamelCase__ , default=8 , help="""Batch size per TPU core.""" , ) parser.add_argument( """--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , ) parser.add_argument( """--tpu_name""" , type=lowerCamelCase__ , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , ) parser.add_argument( """--tpu_zone""" , type=lowerCamelCase__ , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , ) parser.add_argument( """--gcp_project""" , type=lowerCamelCase__ , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , ) parser.add_argument( """--train_dataset""" , type=lowerCamelCase__ , help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--shuffle_buffer_size""" , type=lowerCamelCase__ , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , ) parser.add_argument( """--eval_dataset""" , type=lowerCamelCase__ , help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--num_epochs""" , type=lowerCamelCase__ , default=1 , help="""Number of epochs to train for.""" , ) parser.add_argument( """--learning_rate""" , type=lowerCamelCase__ , default=1E-4 , help="""Learning rate to use for training.""" , ) parser.add_argument( """--weight_decay_rate""" , type=lowerCamelCase__ , default=1E-3 , help="""Weight decay rate to use for training.""" , ) parser.add_argument( """--max_length""" , type=lowerCamelCase__ , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , ) parser.add_argument( """--mlm_probability""" , type=lowerCamelCase__ , default=0.1_5 , help="""Fraction of tokens to mask during training.""" , ) parser.add_argument("""--output_dir""" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""" , type=lowerCamelCase__ , help="""Model ID to upload to on the Hugging Face Hub.""" ) lowerCamelCase = parser.parse_args() return args def __lowerCamelCase ( lowerCamelCase__ : List[str] ): '''simple docstring''' try: if args.tpu_name: lowerCamelCase = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: lowerCamelCase = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(lowerCamelCase__ ) tf.tpu.experimental.initialize_tpu_system(lowerCamelCase__ ) return tpu def __lowerCamelCase ( lowerCamelCase__ : List[str] ): '''simple docstring''' lowerCamelCase = 0 for file in file_list: lowerCamelCase = file.split("""/""" )[-1] lowerCamelCase = re.search(R"""-\d+-(\d+)\.tfrecord""" , lowerCamelCase__ ).group(1 ) lowerCamelCase = int(lowerCamelCase__ ) num_samples += sample_count return num_samples def __lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int=None ): '''simple docstring''' lowerCamelCase = count_samples(lowerCamelCase__ ) lowerCamelCase = tf.data.Dataset.from_tensor_slices(lowerCamelCase__ ) if shuffle: lowerCamelCase = dataset.shuffle(len(lowerCamelCase__ ) ) lowerCamelCase = tf.data.TFRecordDataset(lowerCamelCase__ , num_parallel_reads=lowerCamelCase__ ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here lowerCamelCase = dataset.apply(tf.data.experimental.assert_cardinality(lowerCamelCase__ ) ) lowerCamelCase = dataset.map(lowerCamelCase__ , num_parallel_calls=lowerCamelCase__ ) if shuffle: assert shuffle_buffer_size is not None lowerCamelCase = dataset.shuffle(args.shuffle_buffer_size ) lowerCamelCase = dataset.batch(lowerCamelCase__ , drop_remainder=lowerCamelCase__ ) lowerCamelCase = dataset.map(lowerCamelCase__ , num_parallel_calls=lowerCamelCase__ ) lowerCamelCase = dataset.prefetch(lowerCamelCase__ ) return dataset def __lowerCamelCase ( lowerCamelCase__ : Any ): '''simple docstring''' if not args.no_tpu: lowerCamelCase = initialize_tpu(lowerCamelCase__ ) lowerCamelCase = tf.distribute.TPUStrategy(lowerCamelCase__ ) else: lowerCamelCase = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) lowerCamelCase = AutoTokenizer.from_pretrained(args.tokenizer ) lowerCamelCase = AutoConfig.from_pretrained(args.pretrained_model_config ) lowerCamelCase = tokenizer.vocab_size lowerCamelCase = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) ) if not training_records: raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' ) lowerCamelCase = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) ) if not eval_records: raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' ) lowerCamelCase = count_samples(lowerCamelCase__ ) lowerCamelCase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) lowerCamelCase = steps_per_epoch * args.num_epochs with strategy.scope(): lowerCamelCase = TFAutoModelForMaskedLM.from_config(lowerCamelCase__ ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built lowerCamelCase , lowerCamelCase = create_optimizer( num_train_steps=lowerCamelCase__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=lowerCamelCase__ , metrics=["""accuracy"""] ) def decode_fn(lowerCamelCase__ : Optional[Any] ): lowerCamelCase = { """input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), """attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(lowerCamelCase__ , lowerCamelCase__ ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. lowerCamelCase = DataCollatorForLanguageModeling( tokenizer=lowerCamelCase__ , mlm_probability=args.mlm_probability , mlm=lowerCamelCase__ , return_tensors="""tf""" ) def mask_with_collator(lowerCamelCase__ : List[Any] ): # TF really needs an isin() function lowerCamelCase = ( ~tf.cast(batch["""attention_mask"""] , tf.bool ) | (batch["""input_ids"""] == tokenizer.cls_token_id) | (batch["""input_ids"""] == tokenizer.sep_token_id) ) lowerCamelCase , lowerCamelCase = data_collator.tf_mask_tokens( batch["""input_ids"""] , vocab_size=len(lowerCamelCase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCamelCase__ , ) return batch lowerCamelCase = args.per_replica_batch_size * strategy.num_replicas_in_sync lowerCamelCase = prepare_dataset( lowerCamelCase__ , decode_fn=lowerCamelCase__ , mask_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ , shuffle=lowerCamelCase__ , shuffle_buffer_size=args.shuffle_buffer_size , ) lowerCamelCase = prepare_dataset( lowerCamelCase__ , decode_fn=lowerCamelCase__ , mask_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ , shuffle=lowerCamelCase__ , ) lowerCamelCase = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCamelCase__ ) ) model.fit( lowerCamelCase__ , validation_data=lowerCamelCase__ , epochs=args.num_epochs , callbacks=lowerCamelCase__ , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": UpperCAmelCase : Tuple = parse_args() main(args)
66
1
import random from .binary_exp_mod import bin_exp_mod def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1000 ) -> List[str]: if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd lowerCamelCase : List[Any] = n - 1 lowerCamelCase : Dict = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) lowerCamelCase : Optional[Any] = 0 while count < prec: lowerCamelCase : str = random.randint(2 ,n - 1 ) lowerCamelCase : Dict = bin_exp_mod(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if b != 1: lowerCamelCase : str = True for _ in range(_SCREAMING_SNAKE_CASE ): if b == n - 1: lowerCamelCase : Tuple = False break lowerCamelCase : int = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[int] = abs(int(input('Enter bound : ').strip())) print('Here\'s the list of primes:') print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
48
_A = [0, 2, 4, 6, 8] _A = [1, 3, 5, 7, 9] def lowerCamelCase__ ( a__ : int , a__ : int , a__ : list[int] , a__ : int ) -> int: if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 UpperCamelCase_ = 0 for digit in range(10 ): UpperCamelCase_ = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , a__ , a__ ) return result UpperCamelCase_ = 0 for digita in range(10 ): UpperCamelCase_ = digita if (remainder + digita) % 2 == 0: UpperCamelCase_ = ODD_DIGITS else: UpperCamelCase_ = EVEN_DIGITS for digita in other_parity_digits: UpperCamelCase_ = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , a__ , a__ , ) return result def lowerCamelCase__ ( a__ : int = 9 ) -> int: UpperCamelCase_ = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(a__ , 0 , [0] * length , a__ ) return result if __name__ == "__main__": print(F'''{solution() = }''')
122
0
'''simple docstring''' def lowerCamelCase__ ( _A , _A ): a : Optional[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase__ ( _A , _A , _A ): a : Dict = 0 while b > 0: if b & 1: a : Any = ((res % c) + (a % c)) % c a += a b >>= 1 return res
96
'''simple docstring''' import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class a__( lowerCamelCase__ , unittest.TestCase ): lowercase__ = CTRLTokenizer lowercase__ = False lowercase__ = False def lowercase_ ( self : Dict ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt a : Tuple = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>'] a : Union[str, Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) ) a : Union[str, Any] = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', ''] a : Optional[Any] = {'unk_token': '<unk>'} a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__snake_case ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__snake_case ) ) def lowercase_ ( self : int , **__snake_case : str ): kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **__snake_case ) def lowercase_ ( self : Optional[int] , __snake_case : Any ): a : int = 'adapt react readapt apt' a : Any = 'adapt react readapt apt' return input_text, output_text def lowercase_ ( self : Dict ): a : Dict = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) a : List[str] = 'adapt react readapt apt' a : Dict = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split() a : Any = tokenizer.tokenize(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) a : Dict = tokens + [tokenizer.unk_token] a : Optional[Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
96
1
"""simple docstring""" from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Any = '''''' _lowerCamelCase: int = '''hf-legacy''' # "hf://"" is reserved for hffs def __init__( self : List[str] ,A_ : Optional[DatasetInfo] = None ,A_ : Optional[str] = None ,**A_ : Union[str, Any] ,) -> Dict: super().__init__(self ,**A_ ) A = repo_info A = token A = None def _SCREAMING_SNAKE_CASE ( self : int ) -> str: if self.dir_cache is None: A = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes A = { 'name': hf_file.rfilename, 'size': None, 'type': 'file', } self.dir_cache.update( { str(A_ ): {'name': str(A_ ), 'size': None, 'type': 'directory'} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : str ,A_ : str = "rb" ,**A_ : Dict ,) -> Union[str, Any]: if not isinstance(self.repo_info ,A_ ): raise NotImplementedError(F'Open is only implemented for dataset repositories, but got {self.repo_info}' ) A = hf_hub_url(self.repo_info.id ,A_ ,revision=self.repo_info.sha ) return fsspec.open( A_ ,mode=A_ ,headers=get_authentication_headers_for_url(A_ ,use_auth_token=self.token ) ,client_kwargs={'trust_env': True} ,).open() def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : str ,**A_ : int ) -> Tuple: self._get_dirs() A = self._strip_protocol(A_ ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : str=False ,**A_ : Tuple ) -> str: self._get_dirs() A = PurePosixPath(path.strip('/' ) ) A = {} for p, f in self.dir_cache.items(): A = PurePosixPath(p.strip('/' ) ) A = p.parent if root == path: A = f A = list(paths.values() ) if detail: return out else: return sorted(f['name'] for f in out )
74
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { """google/pix2struct-textcaps-base""": ( """https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json""" ), } class _lowerCamelCase ( UpperCamelCase ): """simple docstring""" snake_case = "pix2struct_text_model" snake_case = ["past_key_values"] snake_case = { "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , _SCREAMING_SNAKE_CASE=5_0244 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , )->int: '''simple docstring''' A_ : Optional[int] = vocab_size A_ : Any = hidden_size A_ : Optional[Any] = d_kv A_ : int = d_ff A_ : int = num_layers A_ : Dict = num_heads A_ : Any = relative_attention_num_buckets A_ : int = relative_attention_max_distance A_ : Optional[Any] = dropout_rate A_ : Optional[Any] = layer_norm_epsilon A_ : List[Any] = initializer_factor A_ : Optional[int] = use_cache A_ : Optional[Any] = eos_token_id A_ : List[Any] = decoder_start_token_id # for backwards compatibility A_ : int = dense_act_fn super().__init__( pad_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , tie_word_embeddings=_SCREAMING_SNAKE_CASE , is_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) @classmethod def _snake_case ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->"PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE ) A_ , A_ : Union[str, Any] = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": A_ : Any = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) class _lowerCamelCase ( UpperCamelCase ): """simple docstring""" snake_case = "pix2struct_vision_model" def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=1e-10 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=128 , **_SCREAMING_SNAKE_CASE , )->Optional[int]: '''simple docstring''' super().__init__(**_SCREAMING_SNAKE_CASE ) A_ : Dict = hidden_size A_ : Union[str, Any] = patch_embed_hidden_size A_ : Optional[Any] = d_ff A_ : Optional[Any] = dropout_rate A_ : int = num_hidden_layers A_ : Tuple = num_attention_heads A_ : List[str] = initializer_range A_ : List[str] = initializer_factor A_ : Union[str, Any] = attention_dropout A_ : Union[str, Any] = layer_norm_eps A_ : Dict = dense_act_fn A_ : Union[str, Any] = seq_len A_ : Optional[Any] = relative_attention_num_buckets A_ : Tuple = relative_attention_max_distance A_ : List[Any] = d_kv @classmethod def _snake_case ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->"PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE ) A_ , A_ : str = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": A_ : Any = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) class _lowerCamelCase ( UpperCamelCase ): """simple docstring""" snake_case = "pix2struct" snake_case = True def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , )->Any: '''simple docstring''' super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if text_config is None: A_ : Dict = {} logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' ) if vision_config is None: A_ : List[Any] = {} logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' ) A_ : int = PixaStructTextConfig(**_SCREAMING_SNAKE_CASE ) A_ : Dict = PixaStructVisionConfig(**_SCREAMING_SNAKE_CASE ) A_ : Optional[Any] = self.text_config.decoder_start_token_id A_ : Tuple = self.text_config.pad_token_id A_ : Union[str, Any] = self.text_config.eos_token_id A_ : str = initializer_factor A_ : Tuple = initializer_range A_ : List[str] = self.initializer_range A_ : int = self.initializer_range A_ : List[Any] = is_vqa @classmethod def _snake_case ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Union[str, Any]: '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self )->Dict: '''simple docstring''' A_ : List[Any] = copy.deepcopy(self.__dict__ ) A_ : Dict = self.text_config.to_dict() A_ : int = self.vision_config.to_dict() A_ : List[str] = self.__class__.model_type return output
186
0
import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def A (__A : List[str] ) -> str: """simple docstring""" UpperCAmelCase_ = [] for line in lines: UpperCAmelCase_ = re.sub(R'''#.*''' , '''''' , __A ) # remove comments if line: filtered_lines.append(__A ) UpperCAmelCase_ = '''\n'''.join(__A ) # Make a hash from all this code UpperCAmelCase_ = full_str.encode('''utf-8''' ) return shaaaa(__A ).hexdigest() # get importable module names and hash for caching snake_case_ : Dict = { "csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), "json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), "pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), "parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), "arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), "text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), "imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), "audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions snake_case_ : Any = { ".csv": ("csv", {}), ".tsv": ("csv", {"sep": "\t"}), ".json": ("json", {}), ".jsonl": ("json", {}), ".parquet": ("parquet", {}), ".arrow": ("arrow", {}), ".txt": ("text", {}), } _EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) snake_case_ : Tuple = {"imagefolder", "audiofolder"} # Used to filter data files based on extensions given a module name snake_case_ : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append(".zip") _MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
7
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) snake_case_ : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Tuple = ["DeiTFeatureExtractor"] snake_case_ : List[str] = ["DeiTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : List[Any] = [ "DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "DeiTForImageClassification", "DeiTForImageClassificationWithTeacher", "DeiTForMaskedImageModeling", "DeiTModel", "DeiTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Dict = [ "TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher", "TFDeiTForMaskedImageModeling", "TFDeiTModel", "TFDeiTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
7
1
'''simple docstring''' import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available __lowerCAmelCase = logging.getLogger(__name__) @dataclass class __magic_name__ : lowerCAmelCase : str lowerCAmelCase : List[str] lowerCAmelCase : Optional[List[str]] @dataclass class __magic_name__ : lowerCAmelCase : List[int] lowerCAmelCase : List[int] lowerCAmelCase : Optional[List[int]] = None lowerCAmelCase : Optional[List[int]] = None class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : List[Any] = 'train' lowerCAmelCase : List[str] = 'dev' lowerCAmelCase : Optional[int] = 'test' class __magic_name__ : @staticmethod def __lowercase ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : Union[Split, str] ): raise NotImplementedError @staticmethod def __lowercase ( _UpperCAmelCase : str ): raise NotImplementedError @staticmethod def __lowercase ( _UpperCAmelCase : List[InputExample] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : int ,_UpperCAmelCase : PreTrainedTokenizer ,_UpperCAmelCase : Dict=False ,_UpperCAmelCase : int="[CLS]" ,_UpperCAmelCase : List[Any]=1 ,_UpperCAmelCase : Optional[int]="[SEP]" ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : int=False ,_UpperCAmelCase : Tuple=0 ,_UpperCAmelCase : Any=0 ,_UpperCAmelCase : int=-100 ,_UpperCAmelCase : List[Any]=0 ,_UpperCAmelCase : str=True ,): _a : str = {label: i for i, label in enumerate(_UpperCAmelCase )} _a : str = [] for ex_index, example in enumerate(_UpperCAmelCase ): if ex_index % 10000 == 0: logger.info('Writing example %d of %d' ,_UpperCAmelCase ,len(_UpperCAmelCase ) ) _a : Optional[int] = [] _a : Union[str, Any] = [] for word, label in zip(example.words ,example.labels ): _a : str = tokenizer.tokenize(_UpperCAmelCase ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(_UpperCAmelCase ) > 0: tokens.extend(_UpperCAmelCase ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_UpperCAmelCase ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. _a : List[Any] = tokenizer.num_special_tokens_to_add() if len(_UpperCAmelCase ) > max_seq_length - special_tokens_count: _a : Optional[Any] = tokens[: (max_seq_length - special_tokens_count)] _a : Optional[int] = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] _a : str = [sequence_a_segment_id] * len(_UpperCAmelCase ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: _a : List[Any] = [cls_token] + tokens _a : Any = [pad_token_label_id] + label_ids _a : List[str] = [cls_token_segment_id] + segment_ids _a : Union[str, Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. _a : Dict = [1 if mask_padding_with_zero else 0] * len(_UpperCAmelCase ) # Zero-pad up to the sequence length. _a : int = max_seq_length - len(_UpperCAmelCase ) if pad_on_left: _a : Optional[Any] = ([pad_token] * padding_length) + input_ids _a : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask _a : str = ([pad_token_segment_id] * padding_length) + segment_ids _a : Optional[int] = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(_UpperCAmelCase ) == max_seq_length assert len(_UpperCAmelCase ) == max_seq_length assert len(_UpperCAmelCase ) == max_seq_length assert len(_UpperCAmelCase ) == max_seq_length if ex_index < 5: logger.info('*** Example ***' ) logger.info('guid: %s' ,example.guid ) logger.info('tokens: %s' ,' '.join([str(_UpperCAmelCase ) for x in tokens] ) ) logger.info('input_ids: %s' ,' '.join([str(_UpperCAmelCase ) for x in input_ids] ) ) logger.info('input_mask: %s' ,' '.join([str(_UpperCAmelCase ) for x in input_mask] ) ) logger.info('segment_ids: %s' ,' '.join([str(_UpperCAmelCase ) for x in segment_ids] ) ) logger.info('label_ids: %s' ,' '.join([str(_UpperCAmelCase ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: _a : Union[str, Any] = None features.append( InputFeatures( input_ids=_UpperCAmelCase ,attention_mask=_UpperCAmelCase ,token_type_ids=_UpperCAmelCase ,label_ids=_UpperCAmelCase ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : List[InputFeatures] lowerCAmelCase : int = nn.CrossEntropyLoss().ignore_index def __init__( self : int ,_UpperCAmelCase : TokenClassificationTask ,_UpperCAmelCase : str ,_UpperCAmelCase : PreTrainedTokenizer ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : List[Any]=False ,_UpperCAmelCase : Split = Split.train ,): # Load data features from cache or dataset file _a : str = os.path.join( _UpperCAmelCase ,'cached_{}_{}_{}'.format(mode.value ,tokenizer.__class__.__name__ ,str(_UpperCAmelCase ) ) ,) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. _a : Tuple = cached_features_file + '.lock' with FileLock(_UpperCAmelCase ): if os.path.exists(_UpperCAmelCase ) and not overwrite_cache: logger.info(F"""Loading features from cached file {cached_features_file}""" ) _a : int = torch.load(_UpperCAmelCase ) else: logger.info(F"""Creating features from dataset file at {data_dir}""" ) _a : List[str] = token_classification_task.read_examples_from_file(_UpperCAmelCase ,_UpperCAmelCase ) # TODO clean up all this to leverage built-in features of tokenizers _a : List[str] = token_classification_task.convert_examples_to_features( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,cls_token_at_end=bool(model_type in ['xlnet'] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['xlnet'] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=_UpperCAmelCase ,pad_on_left=bool(tokenizer.padding_side == 'left' ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,) logger.info(F"""Saving features into cached file {cached_features_file}""" ) torch.save(self.features ,_UpperCAmelCase ) def __len__( self : Any ): return len(self.features ) def __getitem__( self : Tuple ,_UpperCAmelCase : Dict ): return self.features[i] if is_tf_available(): import tensorflow as tf class __magic_name__ : lowerCAmelCase : List[InputFeatures] lowerCAmelCase : int = -1_0_0 def __init__( self : str ,_UpperCAmelCase : TokenClassificationTask ,_UpperCAmelCase : str ,_UpperCAmelCase : PreTrainedTokenizer ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : int=False ,_UpperCAmelCase : Split = Split.train ,): _a : List[Any] = token_classification_task.read_examples_from_file(_UpperCAmelCase ,_UpperCAmelCase ) # TODO clean up all this to leverage built-in features of tokenizers _a : Dict = token_classification_task.convert_examples_to_features( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,cls_token_at_end=bool(model_type in ['xlnet'] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['xlnet'] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=_UpperCAmelCase ,pad_on_left=bool(tokenizer.padding_side == 'left' ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: _a : Optional[Any] = tf.data.Dataset.from_generator( _UpperCAmelCase ,({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) ,( {'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )}, tf.TensorShape([None] ), ) ,) else: _a : str = tf.data.Dataset.from_generator( _UpperCAmelCase ,({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) ,( { 'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] ), 'token_type_ids': tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) ,) def __lowercase ( self : str ): _a : Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__( self : Optional[Any] ): return len(self.features ) def __getitem__( self : str ,_UpperCAmelCase : int ): return self.features[i]
89
"""simple docstring""" import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase : Dict = logging.get_logger(__name__) def a__ ( snake_case__ ) -> Dict: lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" ) if "model" in sd.keys(): lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" )["""model"""] # pop unnecessary weights lowerCamelCase = [ """decoder.version""", """decoder.output_projection.weight""", ] for key in keys_to_delete: if key in sd: sd.pop(snake_case__ ) lowerCamelCase = { """decoder.project_in_dim.weight""": """decoder.project_in.weight""", """decoder.project_out_dim.weight""": """decoder.project_out.weight""", """decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""", """decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""", } for old_key, new_key in keys_to_rename.items(): if old_key in sd: lowerCamelCase = sd.pop(snake_case__ ) lowerCamelCase = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: lowerCamelCase = sd[key] # We split QKV in separate Q,K,V lowerCamelCase = key.replace(""".qkv_proj.""" , """.q_proj.""" ) lowerCamelCase = key.replace(""".qkv_proj.""" , """.k_proj.""" ) lowerCamelCase = key.replace(""".qkv_proj.""" , """.v_proj.""" ) lowerCamelCase = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 lowerCamelCase , lowerCamelCase , lowerCamelCase = torch.split(snake_case__ , depth // 3 , dim=0 ) lowerCamelCase = q lowerCamelCase = k lowerCamelCase = v del sd[key] return sd @torch.no_grad() def a__ ( snake_case__ , snake_case__ , snake_case__=None ) -> Tuple: lowerCamelCase = load_checkpoint(snake_case__ ) if config is not None: lowerCamelCase = OPTConfig.from_pretrained(snake_case__ ) else: lowerCamelCase = OPTConfig() lowerCamelCase = OPTModel(snake_case__ ).half().eval() model.load_state_dict(snake_case__ ) # Check results Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) model.save_pretrained(snake_case__ ) if __name__ == "__main__": lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--fairseq_path""", type=str, help=( """path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:""" """ https://huggingface.co/models?other=opt_metasq""" ), ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""") lowerCAmelCase : Optional[Any] = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
291
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a__ : int = { '''configuration_pix2struct''': [ '''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Pix2StructConfig''', '''Pix2StructTextConfig''', '''Pix2StructVisionConfig''', ], '''processing_pix2struct''': ['''Pix2StructProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Union[str, Any] = ['''Pix2StructImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = [ '''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Pix2StructPreTrainedModel''', '''Pix2StructForConditionalGeneration''', '''Pix2StructVisionModel''', '''Pix2StructTextModel''', ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys a__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
362
"""simple docstring""" a__ : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = input("Enter message: " ) __SCREAMING_SNAKE_CASE = input("Enter key [alphanumeric]: " ) __SCREAMING_SNAKE_CASE = input("Encrypt/Decrypt [e/d]: " ) if mode.lower().startswith("e" ): __SCREAMING_SNAKE_CASE = "encrypt" __SCREAMING_SNAKE_CASE = encrypt_message(lowerCAmelCase_ , lowerCAmelCase_ ) elif mode.lower().startswith("d" ): __SCREAMING_SNAKE_CASE = "decrypt" __SCREAMING_SNAKE_CASE = decrypt_message(lowerCAmelCase_ , lowerCAmelCase_ ) print(f"""\n{mode.title()}ed message:""" ) print(lowerCAmelCase_ ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , "encrypt" ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , "decrypt" ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = key.upper() for symbol in message: __SCREAMING_SNAKE_CASE = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(lowerCAmelCase_ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = 0 else: translated.append(lowerCAmelCase_ ) return "".join(lowerCAmelCase_ ) if __name__ == "__main__": main()
195
0
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __a = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right __a = 25_00_04 __a = 25_00_20 @require_sentencepiece @require_tokenizers class lowerCamelCase ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : List[str] = MBartTokenizer _A : str = MBartTokenizerFast _A : Union[str, Any] = True _A : List[Any] = True def lowerCAmelCase_ ( self: Optional[Any] ) -> str: super().setUp() # We have a SentencePiece fixture for testing snake_case_ :Tuple = MBartTokenizer(snake_case , keep_accents=snake_case ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[str]: snake_case_ :str = MBartTokenizer(snake_case , keep_accents=snake_case ) snake_case_ :int = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(snake_case , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) snake_case_ :Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( snake_case , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) snake_case_ :Union[str, Any] = tokenizer.convert_tokens_to_ids(snake_case ) self.assertListEqual( snake_case , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) snake_case_ :Dict = tokenizer.convert_ids_to_tokens(snake_case ) self.assertListEqual( snake_case , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def lowerCAmelCase_ ( self: Optional[int] ) -> int: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return snake_case_ :List[str] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): snake_case_ :Optional[int] = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case ) snake_case_ :Optional[Any] = self.tokenizer_class.from_pretrained(snake_case , **snake_case ) snake_case_ :Dict = tempfile.mkdtemp() snake_case_ :str = tokenizer_r.save_pretrained(snake_case ) snake_case_ :int = tokenizer_p.save_pretrained(snake_case ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) snake_case_ :int = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(snake_case , snake_case ) # Checks everything loads correctly in the same way snake_case_ :Dict = tokenizer_r.from_pretrained(snake_case ) snake_case_ :Optional[int] = tokenizer_p.from_pretrained(snake_case ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case , snake_case ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(snake_case ) # Save tokenizer rust, legacy_format=True snake_case_ :str = tempfile.mkdtemp() snake_case_ :Union[str, Any] = tokenizer_r.save_pretrained(snake_case , legacy_format=snake_case ) snake_case_ :Any = tokenizer_p.save_pretrained(snake_case ) # Checks it save with the same files self.assertSequenceEqual(snake_case , snake_case ) # Checks everything loads correctly in the same way snake_case_ :Union[str, Any] = tokenizer_r.from_pretrained(snake_case ) snake_case_ :Dict = tokenizer_p.from_pretrained(snake_case ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case , snake_case ) ) shutil.rmtree(snake_case ) # Save tokenizer rust, legacy_format=False snake_case_ :Tuple = tempfile.mkdtemp() snake_case_ :Tuple = tokenizer_r.save_pretrained(snake_case , legacy_format=snake_case ) snake_case_ :int = tokenizer_p.save_pretrained(snake_case ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way snake_case_ :Tuple = tokenizer_r.from_pretrained(snake_case ) snake_case_ :List[Any] = tokenizer_p.from_pretrained(snake_case ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case , snake_case ) ) shutil.rmtree(snake_case ) @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' _A : Optional[Any] = """facebook/mbart-large-en-ro""" _A : str = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] _A : List[str] = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei""" """ pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor""" """ face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] _A : Tuple = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def lowerCAmelCase_ ( cls: Optional[int] ) -> str: snake_case_ :MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" ) snake_case_ :List[Any] = 1 return cls def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250_001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250_004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250_020 ) def lowerCAmelCase_ ( self: Dict ) -> Dict: snake_case_ :List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , snake_case ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[int]: self.assertIn(snake_case , self.tokenizer.all_special_ids ) snake_case_ :int = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2] snake_case_ :Any = self.tokenizer.decode(snake_case , skip_special_tokens=snake_case ) snake_case_ :Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case ) self.assertEqual(snake_case , snake_case ) self.assertNotIn(self.tokenizer.eos_token , snake_case ) def lowerCAmelCase_ ( self: Dict ) -> str: snake_case_ :Optional[Any] = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , snake_case ) snake_case_ :Optional[int] = 10 snake_case_ :List[str] = self.tokenizer(snake_case , max_length=snake_case , truncation=snake_case ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , snake_case ) self.assertEqual(len(snake_case ) , snake_case ) def lowerCAmelCase_ ( self: Tuple ) -> Tuple: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250_026, 250_001] ) def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]: snake_case_ :List[Any] = tempfile.mkdtemp() snake_case_ :int = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(snake_case ) snake_case_ :Dict = MBartTokenizer.from_pretrained(snake_case ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case ) @require_torch def lowerCAmelCase_ ( self: List[str] ) -> List[str]: snake_case_ :str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case , return_tensors="""pt""" ) snake_case_ :str = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def lowerCAmelCase_ ( self: List[Any] ) -> Any: snake_case_ :Union[str, Any] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=snake_case , truncation=snake_case , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) snake_case_ :int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(snake_case , snake_case ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) snake_case_ :List[Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , snake_case ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def lowerCAmelCase_ ( self: Dict ) -> List[str]: snake_case_ :Dict = self.tokenizer(self.src_text , padding=snake_case , truncation=snake_case , max_length=3 , return_tensors="""pt""" ) snake_case_ :Dict = self.tokenizer( text_target=self.tgt_text , padding=snake_case , truncation=snake_case , max_length=10 , return_tensors="""pt""" ) snake_case_ :Any = targets["""input_ids"""] snake_case_ :str = shift_tokens_right(snake_case , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def lowerCAmelCase_ ( self: Dict ) -> Tuple: snake_case_ :Optional[Any] = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" ) self.assertEqual( nested_simplify(snake_case ) , { # A, test, EOS, en_XX """input_ids""": [[62, 3_034, 2, 250_004]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 250_001, } , )
66
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ] ) class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: Any ) -> str: if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case , ) assert hasattr(self , """env""" ) def lowerCAmelCase_ ( self: int , snake_case: Dict ) -> List[Any]: # configuration for running training on smdistributed Model Parallel snake_case_ :Tuple = { """enabled""": True, """processes_per_host""": 8, } snake_case_ :List[Any] = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } snake_case_ :Tuple = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} snake_case_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case , instance_type=self.instance_type , debugger_hook_config=snake_case , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=snake_case , py_version="""py36""" , ) def lowerCAmelCase_ ( self: Any , snake_case: Tuple ) -> List[str]: TrainingJobAnalytics(snake_case ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def lowerCAmelCase_ ( self: Dict , snake_case: Dict ) -> List[Any]: # create estimator snake_case_ :List[Any] = self.create_estimator(snake_case ) # run training estimator.fit() # result dataframe snake_case_ :Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis snake_case_ :Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) snake_case_ :Dict = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping snake_case_ :int = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case )
66
1
'''simple docstring''' import requests from bsa import BeautifulSoup def _UpperCamelCase ( UpperCamelCase__ = "https://www.worldometers.info/coronavirus" ): UpperCAmelCase__ : List[str] = BeautifulSoup(requests.get(__a ).text , """html.parser""" ) UpperCAmelCase__ : Dict = soup.findAll("""h1""" ) UpperCAmelCase__ : Union[str, Any] = soup.findAll("""div""" , {"""class""": """maincounter-number"""} ) keys += soup.findAll("""span""" , {"""class""": """panel-title"""} ) values += soup.findAll("""div""" , {"""class""": """number-table-main"""} ) return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )} if __name__ == "__main__": print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n') for key, value in world_covidaa_stats().items(): print(f"""{key}\n{value}\n""")
355
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A ={ 'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'], 'tokenization_m2m_100': ['M2M100Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A =[ 'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST', 'M2M100ForConditionalGeneration', 'M2M100Model', 'M2M100PreTrainedModel', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys __A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
283
0
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int lowercase__ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class lowerCAmelCase__ ( datasets.BuilderConfig ): '''simple docstring''' lowerCamelCase__ = None def _snake_case ( lowercase__ , lowercase__ , ): import pyspark def generate_fn(): _lowerCamelCase : Tuple = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) ) for partition_id in partition_order: _lowerCamelCase : Optional[Any] = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' ) _lowerCamelCase : int = partition_df.collect() _lowerCamelCase : List[Any] = 0 for row in rows: yield f'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class lowerCAmelCase__ ( _BaseExamplesIterable ): '''simple docstring''' def __init__( self , lowercase , lowercase=None , ): _lowerCamelCase : Any = df _lowerCamelCase : List[Any] = partition_order or range(self.df.rdd.getNumPartitions() ) _lowerCamelCase : Union[str, Any] = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self ): yield from self.generate_examples_fn() def A_ ( self , lowercase ): _lowerCamelCase : List[str] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(lowercase ) return SparkExamplesIterable(self.df , partition_order=lowercase ) def A_ ( self , lowercase , lowercase ): _lowerCamelCase : str = self.split_shard_indices_by_worker(lowercase , lowercase ) return SparkExamplesIterable(self.df , partition_order=lowercase ) @property def A_ ( self ): return len(self.partition_order ) class lowerCAmelCase__ ( datasets.DatasetBuilder ): '''simple docstring''' lowerCamelCase__ = SparkConfig def __init__( self , lowercase , lowercase = None , lowercase = None , **lowercase , ): import pyspark _lowerCamelCase : Dict = pyspark.sql.SparkSession.builder.getOrCreate() _lowerCamelCase : List[Any] = df _lowerCamelCase : List[Any] = working_dir super().__init__( cache_dir=lowercase , config_name=str(self.df.semanticHash() ) , **lowercase , ) def A_ ( self ): # Returns the path of the created file. def create_cache_and_write_probe(lowercase ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=lowercase ) _lowerCamelCase : List[str] = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(lowercase , 'a' ) return [probe_file] if self._spark.conf.get('spark.master' , '' ).startswith('local' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: _lowerCamelCase : int = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( 'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' ) def A_ ( self ): return datasets.DatasetInfo(features=self.config.features ) def A_ ( self , lowercase ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def A_ ( self , lowercase ): import pyspark def get_arrow_batch_size(lowercase ): for batch in it: yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} ) _lowerCamelCase : int = self.df.count() _lowerCamelCase : Dict = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. _lowerCamelCase : Optional[int] = ( self.df.limit(lowercase ) .repartition(1 ) .mapInArrow(lowercase , 'batch_bytes: long' ) .agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) ) .collect()[0] .sample_bytes / sample_num_rows ) _lowerCamelCase : Tuple = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. _lowerCamelCase : Dict = min(lowercase , int(approx_total_size / max_shard_size ) ) _lowerCamelCase : List[str] = self.df.repartition(lowercase ) def A_ ( self , lowercase , lowercase , lowercase , ): import pyspark _lowerCamelCase : List[Any] = ParquetWriter if file_format == 'parquet' else ArrowWriter _lowerCamelCase : List[Any] = os.path.join(self._working_dir , os.path.basename(lowercase ) ) if self._working_dir else fpath _lowerCamelCase : Union[str, Any] = file_format == 'parquet' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. _lowerCamelCase : Optional[int] = self.config.features _lowerCamelCase : Union[str, Any] = self._writer_batch_size _lowerCamelCase : int = self._fs.storage_options def write_arrow(lowercase ): # Within the same SparkContext, no two task attempts will share the same attempt ID. _lowerCamelCase : Dict = pyspark.TaskContext().taskAttemptId() _lowerCamelCase : List[Any] = next(lowercase , lowercase ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , ) _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : Tuple = writer_class( features=lowercase , path=working_fpath.replace('SSSSS' , F'''{shard_id:05d}''' ).replace('TTTTT' , F'''{task_id:05d}''' ) , writer_batch_size=lowercase , storage_options=lowercase , embed_local_files=lowercase , ) _lowerCamelCase : int = pa.Table.from_batches([first_batch] ) writer.write_table(lowercase ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: _lowerCamelCase, _lowerCamelCase : str = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) shard_id += 1 _lowerCamelCase : int = writer_class( features=writer._features , path=working_fpath.replace('SSSSS' , F'''{shard_id:05d}''' ).replace('TTTTT' , F'''{task_id:05d}''' ) , writer_batch_size=lowercase , storage_options=lowercase , embed_local_files=lowercase , ) _lowerCamelCase : Optional[int] = pa.Table.from_batches([batch] ) writer.write_table(lowercase ) if writer._num_bytes > 0: _lowerCamelCase, _lowerCamelCase : Optional[Any] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(lowercase ) ): _lowerCamelCase : Dict = os.path.join(os.path.dirname(lowercase ) , os.path.basename(lowercase ) ) shutil.move(lowercase , lowercase ) _lowerCamelCase : Any = ( self.df.mapInArrow(lowercase , 'task_id: long, num_examples: long, num_bytes: long' ) .groupBy('task_id' ) .agg( pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def A_ ( self , lowercase , lowercase = "arrow" , lowercase = None , lowercase = None , **lowercase , ): self._validate_cache_dir() _lowerCamelCase : Optional[Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(lowercase ) _lowerCamelCase : Optional[Any] = not is_remote_filesystem(self._fs ) _lowerCamelCase : int = os.path.join if is_local else posixpath.join _lowerCamelCase : Optional[Any] = '-TTTTT-SSSSS-of-NNNNN' _lowerCamelCase : Any = F'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' _lowerCamelCase : Any = path_join(self._output_dir , lowercase ) _lowerCamelCase : Dict = 0 _lowerCamelCase : Dict = 0 _lowerCamelCase : Dict = 0 _lowerCamelCase : int = [] _lowerCamelCase : List[Any] = [] for task_id, content in self._prepare_split_single(lowercase , lowercase , lowercase ): ( ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ) : str = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(lowercase ) _lowerCamelCase : int = total_num_examples _lowerCamelCase : Optional[int] = total_num_bytes # should rename everything at the end logger.debug(F'''Renaming {total_shards} shards.''' ) if total_shards > 1: _lowerCamelCase : List[Any] = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. _lowerCamelCase : Optional[int] = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( lowercase , lowercase , lowercase , ): rename( lowercase , fpath.replace('SSSSS' , F'''{shard_id:05d}''' ).replace('TTTTT' , F'''{task_id:05d}''' ) , fpath.replace('TTTTT-SSSSS' , F'''{global_shard_id:05d}''' ).replace('NNNNN' , F'''{total_shards:05d}''' ) , ) _lowerCamelCase : List[Any] = [] _lowerCamelCase : Union[str, Any] = 0 for i in range(len(lowercase ) ): _lowerCamelCase, _lowerCamelCase : List[str] = task_id_and_num_shards[i] for shard_id in range(lowercase ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(lowercase , len(lowercase ) ).map(lambda lowercase : _rename_shard(*lowercase ) ).collect() else: # don't use any pattern _lowerCamelCase : Dict = 0 _lowerCamelCase : Optional[Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace('SSSSS' , F'''{shard_id:05d}''' ).replace('TTTTT' , F'''{task_id:05d}''' ) , fpath.replace(lowercase , '' ) , ) def A_ ( self , lowercase , ): return SparkExamplesIterable(self.df )
96
"""simple docstring""" import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = ["""image_processor""", """tokenizer"""] lowerCamelCase__ = """BlipImageProcessor""" lowerCamelCase__ = """AutoTokenizer""" def __init__( self , lowercase , lowercase , lowercase ): super().__init__(lowercase , lowercase ) # add QFormer tokenizer _lowerCamelCase : int = qformer_tokenizer def __call__( self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ): if images is None and text is None: raise ValueError('You have to specify at least images or text.' ) _lowerCamelCase : int = BatchFeature() if text is not None: _lowerCamelCase : List[str] = self.tokenizer( text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , ) encoding.update(lowercase ) _lowerCamelCase : List[str] = self.qformer_tokenizer( text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , ) _lowerCamelCase : List[Any] = qformer_text_encoding.pop('input_ids' ) _lowerCamelCase : Tuple = qformer_text_encoding.pop('attention_mask' ) if images is not None: _lowerCamelCase : int = self.image_processor(lowercase , return_tensors=lowercase ) encoding.update(lowercase ) return encoding def A_ ( self , *lowercase , **lowercase ): return self.tokenizer.batch_decode(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): return self.tokenizer.decode(*lowercase , **lowercase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def A_ ( self ): _lowerCamelCase : Union[str, Any] = self.tokenizer.model_input_names _lowerCamelCase : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def A_ ( self , lowercase , **lowercase ): if os.path.isfile(lowercase ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(lowercase , exist_ok=lowercase ) _lowerCamelCase : Optional[Any] = os.path.join(lowercase , 'qformer_tokenizer' ) self.qformer_tokenizer.save_pretrained(lowercase ) return super().save_pretrained(lowercase , **lowercase ) @classmethod def A_ ( cls , lowercase , **lowercase ): _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , subfolder='qformer_tokenizer' ) _lowerCamelCase : Dict = cls._get_arguments_from_pretrained(lowercase , **lowercase ) args.append(lowercase ) return cls(*lowercase )
96
1
"""simple docstring""" from math import factorial def SCREAMING_SNAKE_CASE_ ( snake_case : int = 20 )-> int: _lowerCamelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... _lowerCamelCase = n // 2 return int(factorial(snake_case ) / (factorial(snake_case ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(2_0)) else: try: A_ : Optional[Any] =int(sys.argv[1]) print(solution(n)) except ValueError: print("""Invalid entry - please enter a number.""")
363
"""simple docstring""" from collections import defaultdict from math import gcd def SCREAMING_SNAKE_CASE_ ( snake_case : int = 1_500_000 )-> int: _lowerCamelCase = defaultdict(snake_case ) _lowerCamelCase = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , snake_case , 2 ): if gcd(snake_case , snake_case ) > 1: continue _lowerCamelCase = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(snake_case , limit + 1 , snake_case ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(f'{solution() = }')
80
0
import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] ) -> str: '''simple docstring''' A__ = [] for line in lines: A__ = re.sub(R'#.*' , '' , SCREAMING_SNAKE_CASE__ ) # remove comments if line: filtered_lines.append(SCREAMING_SNAKE_CASE__ ) A__ = '\n'.join(SCREAMING_SNAKE_CASE__ ) # Make a hash from all this code A__ = full_str.encode('utf-8' ) return shaaaa(SCREAMING_SNAKE_CASE__ ).hexdigest() # get importable module names and hash for caching lowercase_ = { "csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), "json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), "pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), "parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), "arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), "text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), "imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), "audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions lowercase_ = { ".csv": ("csv", {}), ".tsv": ("csv", {"sep": "\t"}), ".json": ("json", {}), ".jsonl": ("json", {}), ".parquet": ("parquet", {}), ".arrow": ("arrow", {}), ".txt": ("text", {}), } _EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) lowercase_ = {"imagefolder", "audiofolder"} # Used to filter data files based on extensions given a module name lowercase_ = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append(".zip") _MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
7
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'open-llama' def __init__( self : Any,lowercase_ : Optional[int]=1_0_0_0_0_0,lowercase_ : Union[str, Any]=4_0_9_6,lowercase_ : Dict=1_1_0_0_8,lowercase_ : Dict=3_2,lowercase_ : Optional[int]=3_2,lowercase_ : Dict="silu",lowercase_ : Union[str, Any]=2_0_4_8,lowercase_ : Optional[int]=0.02,lowercase_ : Dict=1E-6,lowercase_ : Dict=True,lowercase_ : List[Any]=0,lowercase_ : Optional[int]=1,lowercase_ : str=2,lowercase_ : str=False,lowercase_ : str=True,lowercase_ : int=0.1,lowercase_ : List[Any]=0.1,lowercase_ : List[Any]=True,lowercase_ : Union[str, Any]=True,lowercase_ : Any=None,**lowercase_ : List[Any],)-> Tuple: '''simple docstring''' A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = intermediate_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = initializer_range A__ = rms_norm_eps A__ = use_cache A__ = kwargs.pop( 'use_memorry_efficient_attention',lowercase_ ) A__ = hidden_dropout_prob A__ = attention_dropout_prob A__ = use_stable_embedding A__ = shared_input_output_embedding A__ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,tie_word_embeddings=lowercase_,**lowercase_,) def snake_case__ ( self : str )-> str: '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling,lowercase_ ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F'got {self.rope_scaling}' ) A__ = self.rope_scaling.get('type',lowercase_ ) A__ = self.rope_scaling.get('factor',lowercase_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(lowercase_,lowercase_ ) or rope_scaling_factor <= 1.0: raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
7
1
"""simple docstring""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES _UpperCamelCase: Optional[int] = logging.get_logger(__name__) _UpperCamelCase: Union[str, Any] = OrderedDict( [ # Base model mapping ('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('clip', 'FlaxCLIPModel'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel'), ] ) _UpperCamelCase: int = OrderedDict( [ # Model for pre-training mapping ('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) _UpperCamelCase: int = OrderedDict( [ # Model for Masked LM mapping ('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) _UpperCamelCase: Any = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration'), ] ) _UpperCamelCase: List[str] = OrderedDict( [ # Model for Image-classsification ('beit', 'FlaxBeitForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification'), ] ) _UpperCamelCase: Tuple = OrderedDict( [ ('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'), ] ) _UpperCamelCase: Dict = OrderedDict( [ # Model for Causal LM mapping ('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM'), ] ) _UpperCamelCase: Union[str, Any] = OrderedDict( [ # Model for Sequence Classification mapping ('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'), ] ) _UpperCamelCase: int = OrderedDict( [ # Model for Question Answering mapping ('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'), ] ) _UpperCamelCase: int = OrderedDict( [ # Model for Token Classification mapping ('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'), ] ) _UpperCamelCase: Dict = OrderedDict( [ # Model for Multiple Choice mapping ('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'), ] ) _UpperCamelCase: int = OrderedDict( [ ('bert', 'FlaxBertForNextSentencePrediction'), ] ) _UpperCamelCase: Optional[int] = OrderedDict( [ ('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ] ) _UpperCamelCase: int = OrderedDict( [ ('whisper', 'FlaxWhisperForAudioClassification'), ] ) _UpperCamelCase: int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) _UpperCamelCase: List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) _UpperCamelCase: Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) _UpperCamelCase: int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) _UpperCamelCase: str = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) _UpperCamelCase: Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) _UpperCamelCase: str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) _UpperCamelCase: Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) _UpperCamelCase: Any = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) _UpperCamelCase: Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) _UpperCamelCase: List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) _UpperCamelCase: Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) _UpperCamelCase: Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) _UpperCamelCase: List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class a__ ( _BaseAutoModelClass ): _lowerCamelCase = FLAX_MODEL_MAPPING _UpperCamelCase: Dict = auto_class_update(FlaxAutoModel) class a__ ( _BaseAutoModelClass ): _lowerCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING _UpperCamelCase: int = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class a__ ( _BaseAutoModelClass ): _lowerCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING _UpperCamelCase: List[str] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class a__ ( _BaseAutoModelClass ): _lowerCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING _UpperCamelCase: int = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class a__ ( _BaseAutoModelClass ): _lowerCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _UpperCamelCase: Tuple = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base' ) class a__ ( _BaseAutoModelClass ): _lowerCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING _UpperCamelCase: int = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='sequence classification' ) class a__ ( _BaseAutoModelClass ): _lowerCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING _UpperCamelCase: List[Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class a__ ( _BaseAutoModelClass ): _lowerCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING _UpperCamelCase: Optional[Any] = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='token classification' ) class a__ ( _BaseAutoModelClass ): _lowerCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING _UpperCamelCase: Union[str, Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class a__ ( _BaseAutoModelClass ): _lowerCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING _UpperCamelCase: Any = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction' ) class a__ ( _BaseAutoModelClass ): _lowerCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING _UpperCamelCase: Optional[Any] = auto_class_update( FlaxAutoModelForImageClassification, head_doc='image classification' ) class a__ ( _BaseAutoModelClass ): _lowerCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING _UpperCamelCase: List[Any] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling') class a__ ( _BaseAutoModelClass ): _lowerCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING _UpperCamelCase: Optional[Any] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling' )
53
"""simple docstring""" # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers _UpperCamelCase: Any = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
53
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __a = { 'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'], 'convert_funnel_original_tf_checkpoint_to_pytorch': [], 'tokenization_funnel': ['FunnelTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['FunnelTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST', 'FunnelBaseModel', 'FunnelForMaskedLM', 'FunnelForMultipleChoice', 'FunnelForPreTraining', 'FunnelForQuestionAnswering', 'FunnelForSequenceClassification', 'FunnelForTokenClassification', 'FunnelModel', 'FunnelPreTrainedModel', 'load_tf_weights_in_funnel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFFunnelBaseModel', 'TFFunnelForMaskedLM', 'TFFunnelForMultipleChoice', 'TFFunnelForPreTraining', 'TFFunnelForQuestionAnswering', 'TFFunnelForSequenceClassification', 'TFFunnelForTokenClassification', 'TFFunnelModel', 'TFFunnelPreTrainedModel', ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class A_ : '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ): return None class A_ : '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ): return None class A_ ( unittest.TestCase ): '''simple docstring''' _UpperCamelCase : Tuple = [ # (model_name, model_kwargs) ("""bert-base-cased""", {}), ("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def SCREAMING_SNAKE_CASE__ ( self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(snake_case , 'tf' , 12 , **snake_case ) @require_torch @slow def SCREAMING_SNAKE_CASE__ ( self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(snake_case , 'pt' , 12 , **snake_case ) @require_torch @slow def SCREAMING_SNAKE_CASE__ ( self ): from transformers import BertModel lowercase = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words'] with NamedTemporaryFile(mode='w+t' ) as vocab_file: vocab_file.write('\n'.join(snake_case ) ) vocab_file.flush() lowercase = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowercase = BertModel(BertConfig(vocab_size=len(snake_case ) ) ) model.save_pretrained(snake_case ) self._test_export(snake_case , 'pt' , 12 , snake_case ) @require_tf @slow def SCREAMING_SNAKE_CASE__ ( self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase = self._test_export(snake_case , 'tf' , 12 , **snake_case ) lowercase = quantize(Path(snake_case ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(snake_case ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) @require_torch @slow def SCREAMING_SNAKE_CASE__ ( self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase = self._test_export(snake_case , 'pt' , 12 , **snake_case ) lowercase = quantize(snake_case ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(snake_case ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case=None , **snake_case ): try: # Compute path with TemporaryDirectory() as tempdir: lowercase = Path(snake_case ).joinpath('model.onnx' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case ) return path except Exception as e: self.fail(snake_case ) @require_torch @require_tokenizers @slow def SCREAMING_SNAKE_CASE__ ( self ): from transformers import BertModel lowercase = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(snake_case , snake_case , 'pt' ) @require_tf @require_tokenizers @slow def SCREAMING_SNAKE_CASE__ ( self ): from transformers import TFBertModel lowercase = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(snake_case , snake_case , 'tf' ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ): lowercase = FeatureExtractionPipeline(snake_case , snake_case ) lowercase = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1'] lowercase , lowercase , lowercase , lowercase = infer_shapes(snake_case , snake_case ) # Assert all variables are present self.assertEqual(len(snake_case ) , len(snake_case ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , snake_case ) self.assertSequenceEqual(variable_names[3:] , snake_case ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} ) self.assertDictEqual(shapes['output_1'] , {0: 'batch'} ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = ['input_ids', 'attention_mask', 'token_type_ids'] lowercase = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]} lowercase , lowercase = ensure_valid_input(FuncContiguousArgs() , snake_case , snake_case ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(snake_case ) , 3 ) # Should have exactly the same input names self.assertEqual(set(snake_case ) , set(snake_case ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(snake_case , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowercase , lowercase = ensure_valid_input(FuncNonContiguousArgs() , snake_case , snake_case ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(snake_case ) , 1 ) self.assertEqual(len(snake_case ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens['input_ids'] ) self.assertEqual(ordered_input_names[0] , 'input_ids' ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' ) self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
195
0
import re def UpperCAmelCase ( _lowerCamelCase ): if len(re.findall("[ATCG]" , _lowerCamelCase ) ) != len(_lowerCamelCase ): raise ValueError("Invalid Strand" ) return dna.translate(dna.maketrans("ATCG" , "TAGC" ) ) if __name__ == "__main__": import doctest doctest.testmod()
256
import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def UpperCAmelCase ( _lowerCamelCase ): A : Any = [] for line in lines: A : List[str] = re.sub(R"#.*" , "" , _lowerCamelCase ) # remove comments if line: filtered_lines.append(_lowerCamelCase ) A : str = "\n".join(_lowerCamelCase ) # Make a hash from all this code A : Any = full_str.encode("utf-8" ) return shaaaa(_lowerCamelCase ).hexdigest() # get importable module names and hash for caching __SCREAMING_SNAKE_CASE = { """csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), """json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), """pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), """parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), """arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), """text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), """imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), """audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions __SCREAMING_SNAKE_CASE = { """.csv""": ("""csv""", {}), """.tsv""": ("""csv""", {"""sep""": """\t"""}), """.json""": ("""json""", {}), """.jsonl""": ("""json""", {}), """.parquet""": ("""parquet""", {}), """.arrow""": ("""arrow""", {}), """.txt""": ("""text""", {}), } _EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) __SCREAMING_SNAKE_CASE = {"""imagefolder""", """audiofolder"""} # Used to filter data files based on extensions given a module name __SCREAMING_SNAKE_CASE = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""") _MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
256
1
import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger _snake_case = get_logger(__name__) _snake_case = Path(__file__).parent / '''model_card_template.md''' _snake_case = uuida().hex _snake_case = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES _snake_case = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES _snake_case = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/''' def lowercase_( SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' lowerCamelCase : Tuple = f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}""" if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f"""; torch/{_torch_version}""" if is_flax_available(): ua += f"""; jax/{_jax_version}""" ua += f"""; flax/{_flax_version}""" if is_onnx_available(): ua += f"""; onnxruntime/{_onnxruntime_version}""" # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): ua += "; " + user_agent return ua def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' if token is None: lowerCamelCase : List[str] = HfFolder.get_token() if organization is None: lowerCamelCase : Union[str, Any] = whoami(SCREAMING_SNAKE_CASE_ )["name"] return f"""{username}/{model_id}""" else: return f"""{organization}/{model_id}""" def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(SCREAMING_SNAKE_CASE_ , "local_rank" ) and args.local_rank not in [-1, 0]: return lowerCamelCase : Optional[int] = args.hub_token if hasattr(SCREAMING_SNAKE_CASE_ , "hub_token" ) else None lowerCamelCase : Dict = get_full_repo_name(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ ) lowerCamelCase : str = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE_ , model_name=SCREAMING_SNAKE_CASE_ , repo_name=SCREAMING_SNAKE_CASE_ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE_ , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE_ , "gradient_accumulation_steps" ) else None ) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE_ , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE_ , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE_ , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE_ , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE_ , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE_ , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE_ , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE_ , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE_ , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , ) lowerCamelCase : Dict = os.path.join(args.output_dir , "README.md" ) model_card.save(SCREAMING_SNAKE_CASE_ ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash lowerCamelCase : str = str(Path(SCREAMING_SNAKE_CASE_ ).as_posix() ) lowerCamelCase : Optional[int] = re.search(r"snapshots/([^/]+)/" , SCREAMING_SNAKE_CASE_ ) if search is None: return None lowerCamelCase : str = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE_ ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. _snake_case = os.path.expanduser( os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface''')) ) _snake_case = os.path.join(hf_cache_home, '''diffusers''') def lowercase_( SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' if new_cache_dir is None: lowerCamelCase : List[Any] = DIFFUSERS_CACHE if old_cache_dir is None: lowerCamelCase : Any = old_diffusers_cache lowerCamelCase : Union[str, Any] = Path(SCREAMING_SNAKE_CASE_ ).expanduser() lowerCamelCase : str = Path(SCREAMING_SNAKE_CASE_ ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): lowerCamelCase : Dict = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE_ ) new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) os.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) try: os.symlink(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). _snake_case = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''') if not os.path.isfile(cache_version_file): _snake_case = 0 else: with open(cache_version_file) as f: try: _snake_case = int(f.read()) except ValueError: _snake_case = 0 if cache_version < 1: _snake_case = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( '''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ''' '''existing cached models. This is a one-time operation, you can interrupt it or run it ''' '''later by calling `diffusers.utils.hub_utils.move_cache()`.''' ) try: move_cache() except Exception as e: _snake_case = '''\n'''.join(traceback.format_tb(e.__traceback__)) logger.error( f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease ''' '''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ''' '''message and we will do our best to help.''' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, '''w''') as f: f.write('''1''') except Exception: logger.warning( f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure ''' '''the directory exists and can be written to.''' ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' if variant is not None: lowerCamelCase : Optional[int] = weights_name.split("." ) lowerCamelCase : Optional[int] = splits[:-1] + [variant] + splits[-1:] lowerCamelCase : Optional[Any] = ".".join(SCREAMING_SNAKE_CASE_ ) return weights_name def lowercase_( SCREAMING_SNAKE_CASE_ , *, SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , ): '''simple docstring''' lowerCamelCase : str = str(SCREAMING_SNAKE_CASE_ ) if os.path.isfile(SCREAMING_SNAKE_CASE_ ): return pretrained_model_name_or_path elif os.path.isdir(SCREAMING_SNAKE_CASE_ ): if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ): # Load from a PyTorch checkpoint lowerCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ): lowerCamelCase : str = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return model_file else: raise EnvironmentError( f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(SCREAMING_SNAKE_CASE_ ).base_version ) >= version.parse("0.20.0" ) ): try: lowerCamelCase : List[str] = hf_hub_download( SCREAMING_SNAKE_CASE_ , filename=_add_variant(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , user_agent=SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ , revision=revision or commit_hash , ) warnings.warn( f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , SCREAMING_SNAKE_CASE_ , ) return model_file except: # noqa: E722 warnings.warn( f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}' so that the correct variant file can be added.""" , SCREAMING_SNAKE_CASE_ , ) try: # 2. Load model file as usual lowerCamelCase : Any = hf_hub_download( SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , user_agent=SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """ "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """ "this model name. Check the model page at " f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" ) except EntryNotFoundError: raise EnvironmentError( f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" ) except HTTPError as err: raise EnvironmentError( f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" ) except ValueError: raise EnvironmentError( f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it""" f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a""" f""" directory containing a file named {weights_name} or""" " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """ "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """ f"""containing a file named {weights_name}""" )
283
def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 , 0 , -1 ): lowerCamelCase : Tuple = False for j in range(SCREAMING_SNAKE_CASE_ , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: lowerCamelCase , lowerCamelCase : int = unsorted[j - 1], unsorted[j] lowerCamelCase : Optional[int] = True for j in range(SCREAMING_SNAKE_CASE_ ): if unsorted[j] > unsorted[j + 1]: lowerCamelCase , lowerCamelCase : Union[str, Any] = unsorted[j + 1], unsorted[j] lowerCamelCase : Optional[Any] = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() _snake_case = input('''Enter numbers separated by a comma:\n''').strip() _snake_case = [int(item) for item in user_input.split(''',''')] print(f'''{cocktail_shaker_sort(unsorted) = }''')
283
1
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger() def __lowercase ( lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : LevitConfig , lowerCamelCase : Path , lowerCamelCase : bool = True ): print(F"Converting {name}..." ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": UpperCamelCase_ : Optional[Any] = timm.create_model('levit_128s' , pretrained=lowerCamelCase ) else: UpperCamelCase_ : List[str] = timm.create_model('levit_128' , pretrained=lowerCamelCase ) if hidden_sizes == 192: UpperCamelCase_ : Optional[Any] = timm.create_model('levit_192' , pretrained=lowerCamelCase ) if hidden_sizes == 256: UpperCamelCase_ : Optional[Any] = timm.create_model('levit_256' , pretrained=lowerCamelCase ) if hidden_sizes == 384: UpperCamelCase_ : Tuple = timm.create_model('levit_384' , pretrained=lowerCamelCase ) from_model.eval() UpperCamelCase_ : str = LevitForImageClassificationWithTeacher(lowerCamelCase ).eval() UpperCamelCase_ : Tuple = OrderedDict() UpperCamelCase_ : List[Any] = from_model.state_dict() UpperCamelCase_ : Any = list(from_model.state_dict().keys() ) UpperCamelCase_ : int = list(our_model.state_dict().keys() ) print(len(lowerCamelCase ) , len(lowerCamelCase ) ) for i in range(len(lowerCamelCase ) ): UpperCamelCase_ : Tuple = weights[og_keys[i]] our_model.load_state_dict(lowerCamelCase ) UpperCamelCase_ : str = torch.randn((2, 3, 224, 224) ) UpperCamelCase_ : List[Any] = from_model(lowerCamelCase ) UpperCamelCase_ : Tuple = our_model(lowerCamelCase ).logits assert torch.allclose(lowerCamelCase , lowerCamelCase ), "The model logits don't match the original one." UpperCamelCase_ : Union[str, Any] = name print(lowerCamelCase ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) UpperCamelCase_ : Union[str, Any] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F"Pushed {checkpoint_name}" ) def __lowercase ( lowerCamelCase : Path , lowerCamelCase : str = None , lowerCamelCase : bool = True ): UpperCamelCase_ : int = 'imagenet-1k-id2label.json' UpperCamelCase_ : Union[str, Any] = 1000 UpperCamelCase_ : Optional[int] = (1, num_labels) UpperCamelCase_ : List[Any] = 'huggingface/label-files' UpperCamelCase_ : Optional[Any] = num_labels UpperCamelCase_ : Optional[Any] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) , 'r' ) ) UpperCamelCase_ : int = {int(lowerCamelCase ): v for k, v in idalabel.items()} UpperCamelCase_ : Tuple = idalabel UpperCamelCase_ : Tuple = {v: k for k, v in idalabel.items()} UpperCamelCase_ : str = partial(lowerCamelCase , num_labels=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase ) UpperCamelCase_ : Optional[int] = { 'levit-128S': 128, 'levit-128': 128, 'levit-192': 192, 'levit-256': 256, 'levit-384': 384, } UpperCamelCase_ : List[Any] = { 'levit-128S': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), 'levit-128': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), 'levit-192': ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), 'levit-256': ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), 'levit-384': ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , lowerCamelCase , names_to_config[model_name] , lowerCamelCase , lowerCamelCase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) return config, expected_shape if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default=None, type=str, help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,', ) parser.add_argument( '--pytorch_dump_folder_path', default='levit-dump-folder/', type=Path, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') parser.add_argument( '--no-push_to_hub', dest='push_to_hub', action='store_false', help='Do not push model and image processor to the hub', ) a_ = parser.parse_args() a_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
50
import math import flax.linen as nn import jax.numpy as jnp def __lowercase ( lowerCamelCase : jnp.ndarray , lowerCamelCase : int , lowerCamelCase : float = 1 , lowerCamelCase : float = 1 , lowerCamelCase : float = 1.0e4 , lowerCamelCase : bool = False , lowerCamelCase : float = 1.0 , ): assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F"Embedding dimension {embedding_dim} should be even" UpperCamelCase_ : Dict = float(embedding_dim // 2 ) UpperCamelCase_ : Dict = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) UpperCamelCase_ : Optional[int] = min_timescale * jnp.exp(jnp.arange(lowerCamelCase , dtype=jnp.floataa ) * -log_timescale_increment ) UpperCamelCase_ : int = jnp.expand_dims(lowerCamelCase , 1 ) * jnp.expand_dims(lowerCamelCase , 0 ) # scale embeddings UpperCamelCase_ : Tuple = scale * emb if flip_sin_to_cos: UpperCamelCase_ : Tuple = jnp.concatenate([jnp.cos(lowerCamelCase ), jnp.sin(lowerCamelCase )] , axis=1 ) else: UpperCamelCase_ : Optional[int] = jnp.concatenate([jnp.sin(lowerCamelCase ), jnp.cos(lowerCamelCase )] , axis=1 ) UpperCamelCase_ : Optional[Any] = jnp.reshape(lowerCamelCase , [jnp.shape(lowerCamelCase )[0], embedding_dim] ) return signal class _lowercase ( nn.Module ): lowercase = 3_2 lowercase = jnp.floataa @nn.compact def __call__( self : str , snake_case : Union[str, Any] ) -> Dict: """simple docstring""" UpperCamelCase_ : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(snake_case ) UpperCamelCase_ : int = nn.silu(snake_case ) UpperCamelCase_ : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(snake_case ) return temb class _lowercase ( nn.Module ): lowercase = 3_2 lowercase = False lowercase = 1 @nn.compact def __call__( self : int , snake_case : Any ) -> str: """simple docstring""" return get_sinusoidal_embeddings( snake_case , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
50
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule snake_case_ : Dict = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys snake_case_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
83
'''simple docstring''' import argparse import collections import json import os import re import string import sys import numpy as np a__ : Optional[int] = re.compile(R'\b(a|an|the)\b', re.UNICODE) a__ : int = None def _UpperCamelCase ( ) -> Dict: '''simple docstring''' UpperCamelCase__ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." ) parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." ) parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." ) parser.add_argument( "--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." ) parser.add_argument( "--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." ) parser.add_argument( "--na-prob-thresh" , "-t" , type=__A , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , ) parser.add_argument( "--out-image-dir" , "-p" , metavar="out_images" , default=__A , help="Save precision-recall curves to directory." ) parser.add_argument("--verbose" , "-v" , action="store_true" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def _UpperCamelCase ( __A ) -> Dict: '''simple docstring''' UpperCamelCase__ = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: UpperCamelCase__ = bool(qa["answers"]["text"] ) return qid_to_has_ans def _UpperCamelCase ( __A ) -> Optional[Any]: '''simple docstring''' def remove_articles(__A ): return ARTICLES_REGEX.sub(" " , __A ) def white_space_fix(__A ): return " ".join(text.split() ) def remove_punc(__A ): UpperCamelCase__ = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__A ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) ) def _UpperCamelCase ( __A ) -> int: '''simple docstring''' if not s: return [] return normalize_answer(__A ).split() def _UpperCamelCase ( __A , __A ) -> List[Any]: '''simple docstring''' return int(normalize_answer(__A ) == normalize_answer(__A ) ) def _UpperCamelCase ( __A , __A ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ = get_tokens(__A ) UpperCamelCase__ = get_tokens(__A ) UpperCamelCase__ = collections.Counter(__A ) & collections.Counter(__A ) UpperCamelCase__ = sum(common.values() ) if len(__A ) == 0 or len(__A ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 UpperCamelCase__ = 1.0 * num_same / len(__A ) UpperCamelCase__ = 1.0 * num_same / len(__A ) UpperCamelCase__ = (2 * precision * recall) / (precision + recall) return fa def _UpperCamelCase ( __A , __A ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ = {} UpperCamelCase__ = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: UpperCamelCase__ = qa["id"] UpperCamelCase__ = [t for t in qa["answers"]["text"] if normalize_answer(__A )] if not gold_answers: # For unanswerable questions, only correct answer is empty string UpperCamelCase__ = [""] if qid not in preds: print(F'''Missing prediction for {qid}''' ) continue UpperCamelCase__ = preds[qid] # Take max over all gold answers UpperCamelCase__ = max(compute_exact(__A , __A ) for a in gold_answers ) UpperCamelCase__ = max(compute_fa(__A , __A ) for a in gold_answers ) return exact_scores, fa_scores def _UpperCamelCase ( __A , __A , __A , __A ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ = {} for qid, s in scores.items(): UpperCamelCase__ = na_probs[qid] > na_prob_thresh if pred_na: UpperCamelCase__ = float(not qid_to_has_ans[qid] ) else: UpperCamelCase__ = s return new_scores def _UpperCamelCase ( __A , __A , __A=None ) -> List[Any]: '''simple docstring''' if not qid_list: UpperCamelCase__ = len(__A ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores.values() ) / total), ("f1", 100.0 * sum(fa_scores.values() ) / total), ("total", total), ] ) else: UpperCamelCase__ = len(__A ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ("total", total), ] ) def _UpperCamelCase ( __A , __A , __A ) -> Optional[int]: '''simple docstring''' for k in new_eval: UpperCamelCase__ = new_eval[k] def _UpperCamelCase ( __A , __A , __A , __A ) -> Optional[int]: '''simple docstring''' plt.step(__A , __A , color="b" , alpha=0.2 , where="post" ) plt.fill_between(__A , __A , step="post" , alpha=0.2 , color="b" ) plt.xlabel("Recall" ) plt.ylabel("Precision" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(__A ) plt.savefig(__A ) plt.clf() def _UpperCamelCase ( __A , __A , __A , __A , __A=None , __A=None ) -> Any: '''simple docstring''' UpperCamelCase__ = sorted(__A , key=lambda __A : na_probs[k] ) UpperCamelCase__ = 0.0 UpperCamelCase__ = 1.0 UpperCamelCase__ = 0.0 UpperCamelCase__ = [1.0] UpperCamelCase__ = [0.0] UpperCamelCase__ = 0.0 for i, qid in enumerate(__A ): if qid_to_has_ans[qid]: true_pos += scores[qid] UpperCamelCase__ = true_pos / float(i + 1 ) UpperCamelCase__ = true_pos / float(__A ) if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(__A ) recalls.append(__A ) if out_image: plot_pr_curve(__A , __A , __A , __A ) return {"ap": 100.0 * avg_prec} def _UpperCamelCase ( __A , __A , __A , __A , __A , __A ) -> List[str]: '''simple docstring''' if out_image_dir and not os.path.exists(__A ): os.makedirs(__A ) UpperCamelCase__ = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return UpperCamelCase__ = make_precision_recall_eval( __A , __A , __A , __A , out_image=os.path.join(__A , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , ) UpperCamelCase__ = make_precision_recall_eval( __A , __A , __A , __A , out_image=os.path.join(__A , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , ) UpperCamelCase__ = {k: float(__A ) for k, v in qid_to_has_ans.items()} UpperCamelCase__ = make_precision_recall_eval( __A , __A , __A , __A , out_image=os.path.join(__A , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , ) merge_eval(__A , __A , "pr_exact" ) merge_eval(__A , __A , "pr_f1" ) merge_eval(__A , __A , "pr_oracle" ) def _UpperCamelCase ( __A , __A , __A , __A ) -> List[str]: '''simple docstring''' if not qid_list: return UpperCamelCase__ = [na_probs[k] for k in qid_list] UpperCamelCase__ = np.ones_like(__A ) / float(len(__A ) ) plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) ) plt.xlabel("Model probability of no-answer" ) plt.ylabel("Proportion of dataset" ) plt.title(F'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(__A , F'''na_prob_hist_{name}.png''' ) ) plt.clf() def _UpperCamelCase ( __A , __A , __A , __A ) -> Tuple: '''simple docstring''' UpperCamelCase__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) UpperCamelCase__ = num_no_ans UpperCamelCase__ = cur_score UpperCamelCase__ = 0.0 UpperCamelCase__ = sorted(__A , key=lambda __A : na_probs[k] ) for i, qid in enumerate(__A ): if qid not in scores: continue if qid_to_has_ans[qid]: UpperCamelCase__ = scores[qid] else: if preds[qid]: UpperCamelCase__ = -1 else: UpperCamelCase__ = 0 cur_score += diff if cur_score > best_score: UpperCamelCase__ = cur_score UpperCamelCase__ = na_probs[qid] return 100.0 * best_score / len(__A ), best_thresh def _UpperCamelCase ( __A , __A , __A , __A , __A , __A ) -> Dict: '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(__A , __A , __A , __A ) UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(__A , __A , __A , __A ) UpperCamelCase__ = best_exact UpperCamelCase__ = exact_thresh UpperCamelCase__ = best_fa UpperCamelCase__ = fa_thresh def _UpperCamelCase ( ) -> Any: '''simple docstring''' with open(OPTS.data_file ) as f: UpperCamelCase__ = json.load(__A ) UpperCamelCase__ = dataset_json["data"] with open(OPTS.pred_file ) as f: UpperCamelCase__ = json.load(__A ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: UpperCamelCase__ = json.load(__A ) else: UpperCamelCase__ = {k: 0.0 for k in preds} UpperCamelCase__ = make_qid_to_has_ans(__A ) # maps qid to True/False UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if v] UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if not v] UpperCamelCase__ , UpperCamelCase__ = get_raw_scores(__A , __A ) UpperCamelCase__ = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh ) UpperCamelCase__ = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh ) UpperCamelCase__ = make_eval_dict(__A , __A ) if has_ans_qids: UpperCamelCase__ = make_eval_dict(__A , __A , qid_list=__A ) merge_eval(__A , __A , "HasAns" ) if no_ans_qids: UpperCamelCase__ = make_eval_dict(__A , __A , qid_list=__A ) merge_eval(__A , __A , "NoAns" ) if OPTS.na_prob_file: find_all_best_thresh(__A , __A , __A , __A , __A , __A ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir ) histogram_na_prob(__A , __A , OPTS.out_image_dir , "hasAns" ) histogram_na_prob(__A , __A , OPTS.out_image_dir , "noAns" ) if OPTS.out_file: with open(OPTS.out_file , "w" ) as f: json.dump(__A , __A ) else: print(json.dumps(__A , indent=2 ) ) if __name__ == "__main__": a__ : Optional[int] = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt main()
80
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _UpperCAmelCase : Optional[Any] = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : int = ["PLBartTokenizer"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[int] = [ "PLBART_PRETRAINED_MODEL_ARCHIVE_LIST", "PLBartForCausalLM", "PLBartForConditionalGeneration", "PLBartForSequenceClassification", "PLBartModel", "PLBartPreTrainedModel", ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys _UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
354
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets _UpperCAmelCase : Union[str, Any] = datasets.logging.get_logger(__name__) _UpperCAmelCase : Tuple = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n" _UpperCAmelCase : int = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n" _UpperCAmelCase : Union[str, Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n" def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="dummy_doc" ): lowercase :str = {doc: key_lines} lowercase :Union[str, Any] = {doc: sys_lines} lowercase :Tuple = {} lowercase :Optional[Any] = 0 lowercase :str = 0 lowercase :Optional[Any] = 0 lowercase :str = 0 lowercase :Union[str, Any] = 0 lowercase :int = 0 lowercase , lowercase :str = reader.get_doc_mentions(lowerCamelCase, key_doc_lines[doc], lowerCamelCase ) key_singletons_num += singletons_num if NP_only or min_span: lowercase :Any = reader.set_annotated_parse_trees(lowerCamelCase, key_doc_lines[doc], lowerCamelCase, lowerCamelCase ) lowercase , lowercase :int = reader.get_doc_mentions(lowerCamelCase, sys_doc_lines[doc], lowerCamelCase ) sys_singletons_num += singletons_num if NP_only or min_span: lowercase :str = reader.set_annotated_parse_trees(lowerCamelCase, key_doc_lines[doc], lowerCamelCase, lowerCamelCase ) if remove_nested: lowercase , lowercase :List[str] = reader.remove_nested_coref_mentions(lowerCamelCase, lowerCamelCase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters lowercase , lowercase :Any = reader.remove_nested_coref_mentions(lowerCamelCase, lowerCamelCase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters lowercase :Optional[Any] = reader.get_mention_assignments(lowerCamelCase, lowerCamelCase ) lowercase :str = reader.get_mention_assignments(lowerCamelCase, lowerCamelCase ) lowercase :Optional[int] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( "Number of removed nested coreferring mentions in the key " F"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" ) logger.info( "Number of resulting singleton clusters in the key " F"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" ) if not keep_singletons: logger.info( F"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system " "files, respectively" ) return doc_coref_infos def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowercase :Union[str, Any] = get_coref_infos(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ) lowercase :List[str] = {} lowercase :Dict = 0 lowercase :Tuple = 0 for name, metric in metrics: lowercase , lowercase , lowercase :int = evaluator.evaluate_documents(lowerCamelCase, lowerCamelCase, beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F"{name}/recall": recall, F"{name}/precision": precision, F"{name}/f1": fa} ) logger.info( name.ljust(10 ), F"Recall: {recall * 100:.2f}", F" Precision: {precision * 100:.2f}", F" F1: {fa * 100:.2f}", ) if conll_subparts_num == 3: lowercase :Any = (conll / 3) * 100 logger.info(F"CoNLL score: {conll:.2f}" ) output_scores.update({"conll_score": conll} ) return output_scores def UpperCAmelCase__ ( lowerCamelCase ): lowercase :str = False for line in key_lines: if not line.startswith("#" ): if len(line.split() ) > 6: lowercase :Union[str, Any] = line.split()[5] if not parse_col == "-": lowercase :Optional[int] = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class __lowerCAmelCase ( datasets.Metric): def SCREAMING_SNAKE_CASE ( self: Any ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ) ), "references": datasets.Sequence(datasets.Value("string" ) ), } ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[ "https://github.com/ns-moosavi/coval", "https://www.aclweb.org/anthology/P16-1060", "http://www.conll.cemantix.org/2012/data.html", ] , ) def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Dict , _lowerCAmelCase: Tuple , _lowerCAmelCase: Tuple=True , _lowerCAmelCase: Dict=False , _lowerCAmelCase: Optional[int]=False , _lowerCAmelCase: Dict=False ): lowercase :Any = [ ("mentions", evaluator.mentions), ("muc", evaluator.muc), ("bcub", evaluator.b_cubed), ("ceafe", evaluator.ceafe), ("lea", evaluator.lea), ] if min_span: lowercase :List[str] = util.check_gold_parse_annotation(_lowerCAmelCase ) if not has_gold_parse: raise NotImplementedError("References should have gold parse annotation to use 'min_span'." ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" lowercase :List[str] = evaluate( key_lines=_lowerCAmelCase , sys_lines=_lowerCAmelCase , metrics=_lowerCAmelCase , NP_only=_lowerCAmelCase , remove_nested=_lowerCAmelCase , keep_singletons=_lowerCAmelCase , min_span=_lowerCAmelCase , ) return score
158
0
'''simple docstring''' from __future__ import annotations from typing import Any class snake_case ( __lowerCamelCase ): """simple docstring""" pass class snake_case : """simple docstring""" def __init__( self : List[Any] , __A : Any ): __UpperCamelCase = data __UpperCamelCase = None def __iter__( self : Optional[Any] ): __UpperCamelCase = self __UpperCamelCase = [] while node: if node in visited: raise ContainsLoopError visited.append(__A ) yield node.data __UpperCamelCase = node.next_node @property def _lowerCamelCase ( self : List[str] ): try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": a__ : Dict =Node(1) a__ : Optional[int] =Node(2) a__ : List[str] =Node(3) a__ : Optional[int] =Node(4) print(root_node.has_loop) # False a__ : str =root_node.next_node print(root_node.has_loop) # True a__ : Optional[int] =Node(5) a__ : List[Any] =Node(6) a__ : int =Node(5) a__ : Tuple =Node(6) print(root_node.has_loop) # False a__ : str =Node(1) print(root_node.has_loop) # False
53
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys a__ : Tuple ='''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
53
1
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def __SCREAMING_SNAKE_CASE ( A_ ): lowerCAmelCase__ : Dict = botoa.client('''iam''' ) lowerCAmelCase__ : Optional[Any] = { '''Version''': '''2012-10-17''', '''Statement''': [ {'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A_ , AssumeRolePolicyDocument=json.dumps(A_ , indent=2 ) ) lowerCAmelCase__ : Optional[int] = { '''Version''': '''2012-10-17''', '''Statement''': [ { '''Effect''': '''Allow''', '''Action''': [ '''sagemaker:*''', '''ecr:GetDownloadUrlForLayer''', '''ecr:BatchGetImage''', '''ecr:BatchCheckLayerAvailability''', '''ecr:GetAuthorizationToken''', '''cloudwatch:PutMetricData''', '''cloudwatch:GetMetricData''', '''cloudwatch:GetMetricStatistics''', '''cloudwatch:ListMetrics''', '''logs:CreateLogGroup''', '''logs:CreateLogStream''', '''logs:DescribeLogStreams''', '''logs:PutLogEvents''', '''logs:GetLogEvents''', '''s3:CreateBucket''', '''s3:ListBucket''', '''s3:GetBucketLocation''', '''s3:GetObject''', '''s3:PutObject''', ], '''Resource''': '''*''', } ], } # attach policy to role iam_client.put_role_policy( RoleName=A_ , PolicyName=f'{role_name}_policy_permission' , PolicyDocument=json.dumps(A_ , indent=2 ) , ) except iam_client.exceptions.EntityAlreadyExistsException: print(f'role {role_name} already exists. Using existing one' ) def __SCREAMING_SNAKE_CASE ( A_ ): lowerCAmelCase__ : Union[str, Any] = botoa.client('''iam''' ) return iam_client.get_role(RoleName=A_ )["Role"]["Arn"] def __SCREAMING_SNAKE_CASE ( ): lowerCAmelCase__ : Dict = _ask_options( '''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , A_ , ) lowerCAmelCase__ : int = None if credentials_configuration == 0: lowerCAmelCase__ : Tuple = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' ) lowerCAmelCase__ : Optional[int] = aws_profile else: print( '''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,''' '''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' ) lowerCAmelCase__ : Optional[Any] = _ask_field('''AWS Access Key ID: ''' ) lowerCAmelCase__ : Union[str, Any] = aws_access_key_id lowerCAmelCase__ : Dict = _ask_field('''AWS Secret Access Key: ''' ) lowerCAmelCase__ : str = aws_secret_access_key lowerCAmelCase__ : Optional[Any] = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' ) lowerCAmelCase__ : Optional[int] = aws_region lowerCAmelCase__ : List[str] = _ask_options( '''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , A_ , ) if role_management == 0: lowerCAmelCase__ : Optional[Any] = _ask_field('''Enter your IAM role name: ''' ) else: lowerCAmelCase__ : Optional[int] = '''accelerate_sagemaker_execution_role''' print(f'Accelerate will create an iam role "{iam_role_name}" using the provided credentials' ) _create_iam_role_for_sagemaker(A_ ) lowerCAmelCase__ : Optional[int] = _ask_field( '''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=A_ , error_message='''Please enter yes or no.''' , ) lowerCAmelCase__ : Any = None if is_custom_docker_image: lowerCAmelCase__ : List[Any] = _ask_field('''Enter your Docker image: ''' , lambda A_ : str(A_ ).lower() ) lowerCAmelCase__ : List[Any] = _ask_field( '''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=A_ , error_message='''Please enter yes or no.''' , ) lowerCAmelCase__ : Tuple = None if is_sagemaker_inputs_enabled: lowerCAmelCase__ : Dict = _ask_field( '''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda A_ : str(A_ ).lower() , ) lowerCAmelCase__ : List[Any] = _ask_field( '''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=A_ , error_message='''Please enter yes or no.''' , ) lowerCAmelCase__ : Optional[Any] = None if is_sagemaker_metrics_enabled: lowerCAmelCase__ : str = _ask_field( '''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda A_ : str(A_ ).lower() , ) lowerCAmelCase__ : Optional[Any] = _ask_options( '''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , ) lowerCAmelCase__ : Tuple = {} lowerCAmelCase__ : Dict = _ask_field( '''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=A_ , error_message='''Please enter yes or no.''' , ) if use_dynamo: lowerCAmelCase__ : Optional[int] = '''dynamo_''' lowerCAmelCase__ : int = _ask_options( '''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , ) lowerCAmelCase__ : List[Any] = _ask_field( '''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=A_ , error_message='''Please enter yes or no.''' , ) if use_custom_options: lowerCAmelCase__ : List[Any] = _ask_options( '''Which mode do you want to use?''' , A_ , lambda A_ : TORCH_DYNAMO_MODES[int(A_ )] , default='''default''' , ) lowerCAmelCase__ : Optional[Any] = _ask_field( '''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=A_ , error_message='''Please enter yes or no.''' , ) lowerCAmelCase__ : str = _ask_field( '''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=A_ , error_message='''Please enter yes or no.''' , ) lowerCAmelCase__ : Optional[Any] = '''Which EC2 instance type you want to use for your training?''' if distributed_type != SageMakerDistributedType.NO: lowerCAmelCase__ : Optional[Any] = _ask_options( A_ , A_ , lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" lowerCAmelCase__ : Union[str, Any] = _ask_field(A_ , lambda A_ : str(A_ ).lower() , default='''ml.p3.2xlarge''' ) lowerCAmelCase__ : Union[str, Any] = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): lowerCAmelCase__ : Any = _ask_field( '''How many machines do you want use? [1]: ''' , A_ , default=1 , ) lowerCAmelCase__ : str = _ask_options( '''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , ) if use_dynamo and mixed_precision == "no": print( '''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' ) return SageMakerConfig( image_uri=A_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A_ , use_cpu=A_ , dynamo_config=A_ , eca_instance_type=A_ , profile=A_ , region=A_ , iam_role_name=A_ , mixed_precision=A_ , num_machines=A_ , sagemaker_inputs_file=A_ , sagemaker_metrics_file=A_ , )
74
"""simple docstring""" import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml __UpperCamelCase : Any = NewType('''DataClass''', Any) __UpperCamelCase : List[str] = NewType('''DataClassType''', Any) def __SCREAMING_SNAKE_CASE ( A_ ): if isinstance(A_ , A_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' ) def __SCREAMING_SNAKE_CASE ( A_ ): lowerCAmelCase__ : int = {str(A_ ): choice for choice in choices} return lambda A_ : str_to_choice.get(A_ , A_ ) def __SCREAMING_SNAKE_CASE ( *, A_ = None , A_ = None , A_ = dataclasses.MISSING , A_ = dataclasses.MISSING , A_ = None , **A_ , ): if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls lowerCAmelCase__ : Dict = {} if aliases is not None: lowerCAmelCase__ : int = aliases if help is not None: lowerCAmelCase__ : Optional[int] = help return dataclasses.field(metadata=A_ , default=A_ , default_factory=A_ , **A_ ) class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowercase__ = 42 def __init__( self : Dict ,lowercase_ : Union[DataClassType, Iterable[DataClassType]] ,**lowercase_ : str ): # To make the default appear when using --help if "formatter_class" not in kwargs: lowerCAmelCase__ : Tuple = ArgumentDefaultsHelpFormatter super().__init__(**lowercase_ ) if dataclasses.is_dataclass(lowercase_ ): lowerCAmelCase__ : Tuple = [dataclass_types] lowerCAmelCase__ : List[str] = list(lowercase_ ) for dtype in self.dataclass_types: self._add_dataclass_arguments(lowercase_ ) @staticmethod def __lowerCAmelCase ( lowercase_ : ArgumentParser ,lowercase_ : dataclasses.Field ): lowerCAmelCase__ : Dict = F'--{field.name}' lowerCAmelCase__ : List[str] = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type ,lowercase_ ): raise RuntimeError( '''Unresolved type detected, which should have been done with the help of ''' '''`typing.get_type_hints` method by default''' ) lowerCAmelCase__ : List[str] = kwargs.pop('''aliases''' ,[] ) if isinstance(lowercase_ ,lowercase_ ): lowerCAmelCase__ : Optional[Any] = [aliases] lowerCAmelCase__ : Union[str, Any] = getattr(field.type ,'''__origin__''' ,field.type ) if origin_type is Union or (hasattr(lowercase_ ,'''UnionType''' ) and isinstance(lowercase_ ,types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(lowercase_ ) not in field.type.__args__ ): raise ValueError( '''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because''' ''' the argument parser only supports one type per argument.''' F' Problem encountered in field \'{field.name}\'.' ) if type(lowercase_ ) not in field.type.__args__: # filter `str` in Union lowerCAmelCase__ : int = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] lowerCAmelCase__ : List[str] = getattr(field.type ,'''__origin__''' ,field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) lowerCAmelCase__ : Optional[int] = ( field.type.__args__[0] if isinstance(lowercase_ ,field.type.__args__[1] ) else field.type.__args__[1] ) lowerCAmelCase__ : Optional[Any] = getattr(field.type ,'''__origin__''' ,field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) lowerCAmelCase__ : List[Any] = {} if origin_type is Literal or (isinstance(field.type ,lowercase_ ) and issubclass(field.type ,lowercase_ )): if origin_type is Literal: lowerCAmelCase__ : Union[str, Any] = field.type.__args__ else: lowerCAmelCase__ : Optional[Any] = [x.value for x in field.type] lowerCAmelCase__ : List[str] = make_choice_type_function(kwargs['''choices'''] ) if field.default is not dataclasses.MISSING: lowerCAmelCase__ : int = field.default else: lowerCAmelCase__ : Any = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument lowerCAmelCase__ : List[Any] = copy(lowercase_ ) # Hack because type=bool in argparse does not behave as we want. lowerCAmelCase__ : Tuple = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. lowerCAmelCase__ : List[Any] = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way lowerCAmelCase__ : Tuple = default # This tells argparse we accept 0 or 1 value after --field_name lowerCAmelCase__ : Union[str, Any] = '''?''' # This is the value that will get picked if we do --field_name (without value) lowerCAmelCase__ : Any = True elif isclass(lowercase_ ) and issubclass(lowercase_ ,lowercase_ ): lowerCAmelCase__ : List[str] = field.type.__args__[0] lowerCAmelCase__ : str = '''+''' if field.default_factory is not dataclasses.MISSING: lowerCAmelCase__ : Dict = field.default_factory() elif field.default is dataclasses.MISSING: lowerCAmelCase__ : str = True else: lowerCAmelCase__ : List[Any] = field.type if field.default is not dataclasses.MISSING: lowerCAmelCase__ : str = field.default elif field.default_factory is not dataclasses.MISSING: lowerCAmelCase__ : Any = field.default_factory() else: lowerCAmelCase__ : Optional[Any] = True parser.add_argument(lowercase_ ,*lowercase_ ,**lowercase_ ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): lowerCAmelCase__ : Optional[Any] = False parser.add_argument(F'--no_{field.name}' ,action='''store_false''' ,dest=field.name ,**lowercase_ ) def __lowerCAmelCase ( self : str ,lowercase_ : DataClassType ): if hasattr(lowercase_ ,'''_argument_group_name''' ): lowerCAmelCase__ : Optional[int] = self.add_argument_group(dtype._argument_group_name ) else: lowerCAmelCase__ : List[str] = self try: lowerCAmelCase__ : Dict[str, type] = get_type_hints(lowercase_ ) except NameError: raise RuntimeError( F'Type resolution failed for {dtype}. Try declaring the class in global scope or ' '''removing line of `from __future__ import annotations` which opts in Postponed ''' '''Evaluation of Annotations (PEP 563)''' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(lowercase_ ): lowerCAmelCase__ : int = '''.'''.join(map(lowercase_ ,sys.version_info[:3] ) ) raise RuntimeError( F'Type resolution failed for {dtype} on Python {python_version}. Try removing ' '''line of `from __future__ import annotations` which opts in union types as ''' '''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ''' '''support Python versions that lower than 3.10, you need to use ''' '''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ''' '''`X | None`.''' ) from ex raise for field in dataclasses.fields(lowercase_ ): if not field.init: continue lowerCAmelCase__ : Any = type_hints[field.name] self._parse_dataclass_field(lowercase_ ,lowercase_ ) def __lowerCAmelCase ( self : Any ,lowercase_ : Optional[Any]=None ,lowercase_ : str=False ,lowercase_ : str=True ,lowercase_ : Any=None ,lowercase_ : List[str]=None ,): if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): lowerCAmelCase__ : int = [] if args_filename: args_files.append(Path(lowercase_ ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values lowerCAmelCase__ : List[str] = ArgumentParser() args_file_parser.add_argument(lowercase_ ,type=lowercase_ ,action='''append''' ) # Use only remaining args for further parsing (remove the args_file_flag) lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = args_file_parser.parse_known_args(args=lowercase_ ) lowerCAmelCase__ : int = vars(lowercase_ ).get(args_file_flag.lstrip('''-''' ) ,lowercase_ ) if cmd_args_file_paths: args_files.extend([Path(lowercase_ ) for p in cmd_args_file_paths] ) lowerCAmelCase__ : Tuple = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last lowerCAmelCase__ : Dict = file_args + args if args is not None else file_args + sys.argv[1:] lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = self.parse_known_args(args=lowercase_ ) lowerCAmelCase__ : Optional[Any] = [] for dtype in self.dataclass_types: lowerCAmelCase__ : int = {f.name for f in dataclasses.fields(lowercase_ ) if f.init} lowerCAmelCase__ : int = {k: v for k, v in vars(lowercase_ ).items() if k in keys} for k in keys: delattr(lowercase_ ,lowercase_ ) lowerCAmelCase__ : Optional[Any] = dtype(**lowercase_ ) outputs.append(lowercase_ ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(lowercase_ ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' ) return (*outputs,) def __lowerCAmelCase ( self : Any ,lowercase_ : Dict[str, Any] ,lowercase_ : bool = False ): lowerCAmelCase__ : List[Any] = set(args.keys() ) lowerCAmelCase__ : Any = [] for dtype in self.dataclass_types: lowerCAmelCase__ : Optional[Any] = {f.name for f in dataclasses.fields(lowercase_ ) if f.init} lowerCAmelCase__ : Union[str, Any] = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) lowerCAmelCase__ : Union[str, Any] = dtype(**lowercase_ ) outputs.append(lowercase_ ) if not allow_extra_keys and unused_keys: raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(lowercase_ )}' ) return tuple(lowercase_ ) def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : str ,lowercase_ : bool = False ): with open(Path(lowercase_ ) ,encoding='''utf-8''' ) as open_json_file: lowerCAmelCase__ : Union[str, Any] = json.loads(open_json_file.read() ) lowerCAmelCase__ : List[str] = self.parse_dict(lowercase_ ,allow_extra_keys=lowercase_ ) return tuple(lowercase_ ) def __lowerCAmelCase ( self : Dict ,lowercase_ : str ,lowercase_ : bool = False ): lowerCAmelCase__ : Tuple = self.parse_dict(yaml.safe_load(Path(lowercase_ ).read_text() ) ,allow_extra_keys=lowercase_ ) return tuple(lowercase_ )
74
1
"""simple docstring""" import argparse import math import traceback import dateutil.parser as date_parser import requests def lowercase ( a__ : List[Any] ) -> Optional[int]: _UpperCamelCase = {} _UpperCamelCase = job['''started_at'''] _UpperCamelCase = job['''completed_at'''] _UpperCamelCase = date_parser.parse(a__ ) _UpperCamelCase = date_parser.parse(a__ ) _UpperCamelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 ) _UpperCamelCase = start _UpperCamelCase = end _UpperCamelCase = duration_in_min return job_info def lowercase ( a__ : str , a__ : Dict=None ) -> Optional[int]: _UpperCamelCase = None if token is not None: _UpperCamelCase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''} _UpperCamelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' _UpperCamelCase = requests.get(a__ , headers=a__ ).json() _UpperCamelCase = {} try: job_time.update({job['''name''']: extract_time_from_single_job(a__ ) for job in result['''jobs''']} ) _UpperCamelCase = math.ceil((result['''total_count'''] - 100) / 100 ) for i in range(a__ ): _UpperCamelCase = requests.get(url + F'''&page={i + 2}''' , headers=a__ ).json() job_time.update({job['''name''']: extract_time_from_single_job(a__ ) for job in result['''jobs''']} ) return job_time except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") UpperCAmelCase = parser.parse_args() UpperCAmelCase = get_job_time(args.workflow_run_id) UpperCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F'''{k}: {v['duration']}''')
256
"""simple docstring""" def lowercase ( a__ : float , a__ : float ) -> float: if density <= 0: raise ValueError('''Impossible fluid density''' ) if bulk_modulus <= 0: raise ValueError('''Impossible bulk modulus''' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
256
1
"""simple docstring""" import argparse lowerCAmelCase__ = '''docs/source/_static/js/custom.js''' def snake_case_ ( A_ : List[str] ): '''simple docstring''' with open(A_, encoding='''utf-8''', newline='''\n''' ) as f: _lowerCamelCase : int = f.readlines() _lowerCamelCase : List[str] = 0 # First let's put the right version while not lines[index].startswith('''const stableVersion =''' ): index += 1 _lowerCamelCase : List[Any] = F'''const stableVersion = "v{version}"\n''' # Then update the dictionary while not lines[index].startswith('''const versionMapping = {''' ): index += 1 # We go until the end while not lines[index].startswith('''}''' ): index += 1 # We add the new version at the end lines[index - 1] += F''' "v{version}": "v{version}",\n''' with open(A_, '''w''', encoding='''utf-8''', newline='''\n''' ) as f: f.writelines(A_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('''--version''', help='''Release version.''') lowerCAmelCase__ = parser.parse_args() update_custom_js(args.version)
175
"""simple docstring""" def snake_case_ ( A_ : int ): '''simple docstring''' return 1 if digit in (0, 1) else (digit * factorial(digit - 1 )) def snake_case_ ( A_ : int ): '''simple docstring''' _lowerCamelCase : str = 0 _lowerCamelCase : Any = number while duplicate > 0: _lowerCamelCase , _lowerCamelCase : Union[str, Any] = divmod(A_, 10 ) fact_sum += factorial(A_ ) return fact_sum == number if __name__ == "__main__": print('''Program to check whether a number is a Krisnamurthy Number or not.''') lowerCAmelCase__ = int(input('''Enter number: ''').strip()) print( F"""{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.""" )
175
1
from __future__ import annotations from typing import Any def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None: create_state_space_tree(_UpperCAmelCase , [] , 0 ) def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None: if index == len(_UpperCAmelCase ): print(_UpperCAmelCase ) return create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 ) current_subsequence.pop() if __name__ == "__main__": _UpperCAmelCase : list[Any] = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(["""A""", """B""", """C"""]) generate_all_subsequences(seq)
50
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer _UpperCAmelCase : Optional[int] = logging.get_logger(__name__) _UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _UpperCAmelCase : int = { """vocab_file""": { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt""" ), """google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""", """google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""", """google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""", """google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""", }, """tokenizer_file""": { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json""" ), """google/realm-orqa-nq-openqa""": ( """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json""" ), """google/realm-orqa-nq-reader""": ( """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json""" ), """google/realm-orqa-wq-openqa""": ( """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json""" ), """google/realm-orqa-wq-reader""": ( """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json""" ), }, } _UpperCAmelCase : int = { """google/realm-cc-news-pretrained-embedder""": 5_12, """google/realm-cc-news-pretrained-encoder""": 5_12, """google/realm-cc-news-pretrained-scorer""": 5_12, """google/realm-cc-news-pretrained-openqa""": 5_12, """google/realm-orqa-nq-openqa""": 5_12, """google/realm-orqa-nq-reader""": 5_12, """google/realm-orqa-wq-openqa""": 5_12, """google/realm-orqa-wq-reader""": 5_12, } _UpperCAmelCase : Any = { """google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-nq-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-nq-reader""": {"""do_lower_case""": True}, """google/realm-orqa-wq-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-wq-reader""": {"""do_lower_case""": True}, } class lowerCAmelCase ( __UpperCamelCase ): UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = RealmTokenizer def __init__( self : Optional[int] , UpperCAmelCase : Tuple=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]="[UNK]" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Tuple="[PAD]" , UpperCAmelCase : List[Any]="[CLS]" , UpperCAmelCase : Union[str, Any]="[MASK]" , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Optional[int] , ) -> str: super().__init__( UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , ) lowerCamelCase__ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , UpperCAmelCase ) != do_lower_case or normalizer_state.get('strip_accents' , UpperCAmelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase ) != tokenize_chinese_chars ): lowerCamelCase__ : Optional[int] = getattr(UpperCAmelCase , normalizer_state.pop('type' ) ) lowerCamelCase__ : Optional[Any] = do_lower_case lowerCamelCase__ : str = strip_accents lowerCamelCase__ : Optional[Any] = tokenize_chinese_chars lowerCamelCase__ : int = normalizer_class(**UpperCAmelCase ) lowerCamelCase__ : str = do_lower_case def A_ ( self : Optional[int] , UpperCAmelCase : int , **UpperCAmelCase : int ) -> List[Any]: lowerCamelCase__ : List[Any] = PaddingStrategy.MAX_LENGTH lowerCamelCase__ : Optional[int] = text lowerCamelCase__ : Dict = kwargs.pop('text_pair' , UpperCAmelCase ) lowerCamelCase__ : List[Any] = kwargs.pop('return_tensors' , UpperCAmelCase ) lowerCamelCase__ : List[Any] = { 'input_ids': [], 'attention_mask': [], 'token_type_ids': [], } for idx, candidate_text in enumerate(UpperCAmelCase ): if batch_text_pair is not None: lowerCamelCase__ : Tuple = batch_text_pair[idx] else: lowerCamelCase__ : Dict = None lowerCamelCase__ : Optional[int] = super().__call__(UpperCAmelCase , UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ) lowerCamelCase__ : Any = encoded_candidates.get('input_ids' ) lowerCamelCase__ : Union[str, Any] = encoded_candidates.get('attention_mask' ) lowerCamelCase__ : Tuple = encoded_candidates.get('token_type_ids' ) if encoded_input_ids is not None: output_data["input_ids"].append(UpperCAmelCase ) if encoded_attention_mask is not None: output_data["attention_mask"].append(UpperCAmelCase ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(UpperCAmelCase ) lowerCamelCase__ : int = {key: item for key, item in output_data.items() if len(UpperCAmelCase ) != 0} return BatchEncoding(UpperCAmelCase , tensor_type=UpperCAmelCase ) def A_ ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None ) -> List[str]: lowerCamelCase__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def A_ ( self : Tuple , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: lowerCamelCase__ : List[Any] = [self.sep_token_id] lowerCamelCase__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def A_ ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]: lowerCamelCase__ : int = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase ) return tuple(UpperCAmelCase )
50
1
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ : List[Any] =logging.get_logger(__name__) UpperCAmelCase__ : Union[str, Any] ={ '''vocab_file''': '''vocab.json''', '''tokenizer_config_file''': '''tokenizer_config.json''', '''merges_file''': '''merges.txt''', } UpperCAmelCase__ : Any ={ '''vocab_file''': { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json''' ), }, '''tokenizer_config_file''': { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json''' ), }, '''merges_file''': { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt''' ), }, } UpperCAmelCase__ : Dict ='''</w>''' UpperCAmelCase__ : List[Any] ='''@@ ''' def _lowercase ( _UpperCAmelCase ) -> Any: lowerCamelCase =set() lowerCamelCase =word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase =char return pairs # Speech2Text2 has no max input length UpperCAmelCase__ : Optional[int] ={'''facebook/s2t-wav2vec2-large-en-de''': 10_24} class __A ( a ): __A = VOCAB_FILES_NAMES __A = PRETRAINED_VOCAB_FILES_MAP __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = ["""input_ids""", """attention_mask"""] def __init__( self , UpperCAmelCase_ , UpperCAmelCase_="<s>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_=False , UpperCAmelCase_=None , **UpperCAmelCase_ , ): super().__init__( unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCamelCase =do_lower_case with open(UpperCAmelCase_ , encoding="""utf-8""" ) as vocab_handle: lowerCamelCase =json.load(UpperCAmelCase_ ) lowerCamelCase ={v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" ) lowerCamelCase =None lowerCamelCase =None else: with open(UpperCAmelCase_ , encoding="""utf-8""" ) as merges_handle: lowerCamelCase =merges_handle.read().split("""\n""" )[:-1] lowerCamelCase =[tuple(merge.split()[:2] ) for merge in merges] lowerCamelCase =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCamelCase ={} @property def _snake_case ( self ): return len(self.decoder ) def _snake_case ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self , UpperCAmelCase_ ): lowerCamelCase =tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] lowerCamelCase =get_pairs(UpperCAmelCase_ ) if not pairs: return token while True: lowerCamelCase =min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase , lowerCamelCase =bigram lowerCamelCase =[] lowerCamelCase =0 while i < len(UpperCAmelCase_ ): try: lowerCamelCase =word.index(UpperCAmelCase_ , UpperCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase =j if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase =tuple(UpperCAmelCase_ ) lowerCamelCase =new_word if len(UpperCAmelCase_ ) == 1: break else: lowerCamelCase =get_pairs(UpperCAmelCase_ ) lowerCamelCase =""" """.join(UpperCAmelCase_ ) if word == "\n " + BPE_TOKEN_MERGES: lowerCamelCase ="""\n""" + BPE_TOKEN_MERGES if word.endswith(UpperCAmelCase_ ): lowerCamelCase =word.replace(UpperCAmelCase_ , """""" ) lowerCamelCase =word.replace(""" """ , UpperCAmelCase_ ) lowerCamelCase =word return word def _snake_case ( self , UpperCAmelCase_ ): if self.bpe_ranks is None: raise ValueError( """This tokenizer was instantiated without a `merges.txt` file, so""" """ that it can only be used for decoding, not for encoding.""" """Make sure to provide `merges.txt` file at instantiation to enable """ """encoding.""" ) if self.do_lower_case: lowerCamelCase =text.lower() lowerCamelCase =text.split() lowerCamelCase =[] for token in text: if token: split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(""" """ ) ) ) return split_tokens def _snake_case ( self , UpperCAmelCase_ ): return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) ) def _snake_case ( self , UpperCAmelCase_ ): lowerCamelCase =self.decoder.get(UpperCAmelCase_ , self.unk_token ) return result def _snake_case ( self , UpperCAmelCase_ ): lowerCamelCase =""" """.join(UpperCAmelCase_ ) # make sure @@ tokens are concatenated lowerCamelCase ="""""".join(string.split(UpperCAmelCase_ ) ) return string def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if not os.path.isdir(UpperCAmelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCamelCase =os.path.join( UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCamelCase =os.path.join( UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ ) + """\n""" ) lowerCamelCase =0 if self.bpe_ranks is None: return (vocab_file,) with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) lowerCamelCase =token_index writer.write(""" """.join(UpperCAmelCase_ ) + """\n""" ) index += 1 return (vocab_file, merges_file)
262
import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class __A ( unittest.TestCase ): def _snake_case ( self ): lowerCamelCase =Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) lowerCamelCase =Vector() def _snake_case ( self ): lowerCamelCase =Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(UpperCAmelCase_ ) , """(0,0,0,0,0,1)""" ) def _snake_case ( self ): lowerCamelCase =Vector([1, 2, 3, 4] ) self.assertEqual(len(UpperCAmelCase_ ) , 4 ) def _snake_case ( self ): lowerCamelCase =Vector([1, 2] ) lowerCamelCase =Vector([1, 2, 3, 4, 5] ) lowerCamelCase =Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) lowerCamelCase =Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 ) def _snake_case ( self ): lowerCamelCase =Vector([1, 2, 3] ) lowerCamelCase =Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def _snake_case ( self ): lowerCamelCase =Vector([1, 2, 3] ) lowerCamelCase =Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def _snake_case ( self ): lowerCamelCase =Vector([1, 2, 3] ) lowerCamelCase =Vector([2, -1, 4] ) # for test of dot product lowerCamelCase =Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" ) self.assertEqual((a * b) , 0 ) def _snake_case ( self ): self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 ) def _snake_case ( self ): self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" ) def _snake_case ( self ): lowerCamelCase =Vector([1, 2, 3] ) lowerCamelCase =Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , UpperCAmelCase_ , UpperCAmelCase_ ) ) , """(3,4,7)""" ) def _snake_case ( self ): lowerCamelCase =Vector([1, 0, 0, 0, 0, 0] ) lowerCamelCase =x.copy() self.assertEqual(str(UpperCAmelCase_ ) , str(UpperCAmelCase_ ) ) def _snake_case ( self ): lowerCamelCase =Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(UpperCAmelCase_ ) , """(0,1,0)""" ) def _snake_case ( self ): lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase_ ) ) def _snake_case ( self ): lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) lowerCamelCase =[[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(UpperCAmelCase_ , UpperCAmelCase_ ) ) def _snake_case ( self ): lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) lowerCamelCase =[[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(UpperCAmelCase_ , UpperCAmelCase_ ) ) def _snake_case ( self ): lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def _snake_case ( self ): lowerCamelCase =Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) lowerCamelCase =Vector([1, 2, 3] ) self.assertEqual("""(14,32,50)""" , str(a * x ) ) self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) ) def _snake_case ( self ): lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase_ ) ) def _snake_case ( self ): lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 ) def _snake_case ( self ): lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) lowerCamelCase =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) ) def _snake_case ( self ): lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) lowerCamelCase =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) ) def _snake_case ( self ): self.assertEqual( """|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
262
1
'''simple docstring''' import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.getLogger(__name__) def lowerCAmelCase_ ( ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = argparse.ArgumentParser( description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." ) parser.add_argument( "--dataset_name" , type=snake_case_ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , ) parser.add_argument( "--dataset_config" , type=snake_case_ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." ) parser.add_argument( "--tokenizer_name_or_path" , type=snake_case_ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , ) parser.add_argument( "--shard_size" , type=snake_case_ , default=10_00 , help="Number of entries to go in a single shard." , ) parser.add_argument("--split" , type=snake_case_ , default="train" , choices=["train", "test", "validation"] ) parser.add_argument( "--limit" , default=snake_case_ , type=snake_case_ , help="Limit the number of shards (used for debugging)." , ) parser.add_argument( "--max_length" , type=snake_case_ , default=5_12 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum" " sequence length that is a multiple of 8." , ) parser.add_argument( "--output_dir" , default="tf-tpu" , type=snake_case_ , help="Output directory where the TFRecord shards will be saved. If the" " path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord" " shards will be directly saved to a Google Cloud Storage bucket." , ) UpperCAmelCase_ = parser.parse_args() return args def lowerCAmelCase_ ( snake_case_ : Dict ) -> str: '''simple docstring''' def fn(snake_case_ : Optional[Any] ): return tokenizer(examples["text"] ) return fn def lowerCAmelCase_ ( snake_case_ : List[str] ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = [] for i in range(len(tokenized_data["input_ids"] ) ): UpperCAmelCase_ = { "input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ), "attention_mask": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ), } UpperCAmelCase_ = tf.train.Features(feature=snake_case_ ) UpperCAmelCase_ = tf.train.Example(features=snake_case_ ) UpperCAmelCase_ = example.SerializeToString() records.append(snake_case_ ) return records def lowerCAmelCase_ ( snake_case_ : List[str] ) -> str: '''simple docstring''' UpperCAmelCase_ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: UpperCAmelCase_ = min(len(snake_case_ ) , args.limit ) UpperCAmelCase_ = dataset.select(range(snake_case_ ) ) print(f"""Limiting the dataset to {args.limit} entries.""" ) UpperCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) UpperCAmelCase_ = os.path.join(args.output_dir , args.split ) if not os.path.exists(snake_case_ ): os.makedirs(snake_case_ ) else: UpperCAmelCase_ = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. UpperCAmelCase_ = tokenize_function(snake_case_ ) UpperCAmelCase_ = dataset.map(snake_case_ , batched=snake_case_ , num_proc=4 , remove_columns=["text"] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(snake_case_ : str ): # Concatenate all texts. UpperCAmelCase_ = {k: sum(examples[k] , [] ) for k in examples.keys()} UpperCAmelCase_ = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 UpperCAmelCase_ = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. UpperCAmelCase_ = { k: [t[i : i + args.max_length] for i in range(0 , snake_case_ , args.max_length )] for k, t in concatenated_examples.items() } return result UpperCAmelCase_ = dataset_tokenized.map(snake_case_ , batched=snake_case_ , batch_size=10_00 , num_proc=4 ) UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 for shard in range(0 , len(snake_case_ ) , args.shard_size ): UpperCAmelCase_ = grouped_dataset[shard : shard + args.shard_size] UpperCAmelCase_ = len(dataset_snapshot["input_ids"] ) UpperCAmelCase_ = os.path.join(snake_case_ , f"""dataset-{shard_count}-{records_containing}.tfrecord""" ) UpperCAmelCase_ = get_serialized_examples(snake_case_ ) with tf.io.TFRecordWriter(snake_case_ ) as out_file: for i in range(len(snake_case_ ) ): UpperCAmelCase_ = serialized_examples[i] out_file.write(snake_case_ ) print("Wrote file {} containing {} records".format(snake_case_ , snake_case_ ) ) shard_count += 1 total_records += records_containing with open(f"""split-{args.split}-records-count.txt""" , "w" ) as f: print(f"""Total {args.split} records: {total_records}""" , file=snake_case_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: List[str] =parse_args() main(args)
1
'''simple docstring''' import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class lowerCAmelCase_ ( __magic_name__ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Optional[int]: _lowerCAmelCase = parent _lowerCAmelCase = config_class _lowerCAmelCase = has_text_modality _lowerCAmelCase = kwargs _lowerCAmelCase = common_properties def _snake_case ( self ) -> int: _lowerCAmelCase = self.config_class(**self.inputs_dict ) _lowerCAmelCase = ( ["hidden_size", "num_attention_heads", "num_hidden_layers"] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(["vocab_size"] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) , msg=f'''`{prop}` does not exist''' ) # Test that config has the common properties as setter for idx, name in enumerate(_lowerCAmelCase ): try: setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) self.parent.assertEqual( getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(_lowerCAmelCase , _lowerCAmelCase )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(_lowerCAmelCase ): try: _lowerCAmelCase = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(_lowerCAmelCase , _lowerCAmelCase )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = self.config_class(**self.inputs_dict ) _lowerCAmelCase = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , _lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase = os.path.join(_lowerCAmelCase , "config.json" ) config_first.to_json_file(_lowerCAmelCase ) _lowerCAmelCase = self.config_class.from_json_file(_lowerCAmelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _snake_case ( self ) -> str: _lowerCAmelCase = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(_lowerCAmelCase ) _lowerCAmelCase = self.config_class.from_pretrained(_lowerCAmelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _snake_case ( self ) -> Dict: _lowerCAmelCase = self.config_class(**self.inputs_dict ) _lowerCAmelCase = "test" with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase = os.path.join(_lowerCAmelCase , _lowerCAmelCase ) config_first.save_pretrained(_lowerCAmelCase ) _lowerCAmelCase = self.config_class.from_pretrained(_lowerCAmelCase , subfolder=_lowerCAmelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) _lowerCAmelCase = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def _snake_case ( self ) -> List[Any]: if self.config_class.is_composition: return _lowerCAmelCase = self.config_class() self.parent.assertIsNotNone(_lowerCAmelCase ) def _snake_case ( self ) -> str: _lowerCAmelCase = copy.deepcopy(_lowerCAmelCase ) _lowerCAmelCase = self.config_class(**_lowerCAmelCase ) _lowerCAmelCase = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) ) elif getattr(_lowerCAmelCase , _lowerCAmelCase ) != value: wrong_values.append((key, getattr(_lowerCAmelCase , _lowerCAmelCase ), value) ) if len(_lowerCAmelCase ) > 0: _lowerCAmelCase = "\n".join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] ) raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' ) def _snake_case ( self ) -> List[str]: self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
158
0
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin A : List[str] = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''') @require_sentencepiece @require_tokenizers class A (SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : Optional[Any] = SpeechTaTokenizer __lowerCamelCase : Tuple = False __lowerCamelCase : int = True def a_ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing A__ = SpeechTaTokenizer(__lowerCAmelCase ) A__ = AddedToken("""<mask>""" , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) A__ = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) tokenizer.save_pretrained(self.tmpdirname ) def a_ ( self : Tuple , __lowerCAmelCase : Dict ) -> Any: """simple docstring""" A__ = """this is a test""" A__ = """this is a test""" return input_text, output_text def a_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[Any]=20 , __lowerCAmelCase : str=5 ) -> Optional[int]: """simple docstring""" A__ , A__ = self.get_input_output_texts(__lowerCAmelCase ) A__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) A__ = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) return text, ids def a_ ( self : Optional[int] ) -> int: """simple docstring""" A__ = """<pad>""" A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase ) def a_ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-4] , """œ""" ) self.assertEqual(vocab_keys[-2] , """<mask>""" ) self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" ) self.assertEqual(len(__lowerCAmelCase ) , 81 ) def a_ ( self : Tuple ) -> List[str]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def a_ ( self : Any ) -> Any: """simple docstring""" A__ = self.get_tokenizers(do_lower_case=__lowerCAmelCase ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): A__ = tokenizer.vocab_size A__ = len(__lowerCAmelCase ) self.assertNotEqual(__lowerCAmelCase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) A__ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""] A__ = tokenizer.add_tokens(__lowerCAmelCase ) A__ = tokenizer.vocab_size A__ = len(__lowerCAmelCase ) self.assertNotEqual(__lowerCAmelCase , 0 ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , len(__lowerCAmelCase ) ) self.assertEqual(__lowerCAmelCase , all_size + len(__lowerCAmelCase ) ) A__ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=__lowerCAmelCase ) self.assertGreaterEqual(len(__lowerCAmelCase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) A__ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""} A__ = tokenizer.add_special_tokens(__lowerCAmelCase ) A__ = tokenizer.vocab_size A__ = len(__lowerCAmelCase ) self.assertNotEqual(__lowerCAmelCase , 0 ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , len(__lowerCAmelCase ) ) self.assertEqual(__lowerCAmelCase , all_size_a + len(__lowerCAmelCase ) ) A__ = tokenizer.encode( """>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=__lowerCAmelCase ) self.assertGreaterEqual(len(__lowerCAmelCase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def a_ ( self : List[str] ) -> int: """simple docstring""" pass def a_ ( self : List[str] ) -> Any: """simple docstring""" pass def a_ ( self : int ) -> List[Any]: """simple docstring""" A__ = self.get_tokenizer() A__ = tokenizer.tokenize("""This is a test""" ) # fmt: off self.assertListEqual(__lowerCAmelCase , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) A__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __lowerCAmelCase , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) A__ = tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) # fmt: off self.assertListEqual(__lowerCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on A__ = tokenizer.convert_ids_to_tokens(__lowerCAmelCase ) self.assertListEqual( __lowerCAmelCase , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) @slow def a_ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" A__ = [ """Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """ """general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """ """Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """ """models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""", """BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """ """conditioning on both left and right context in all layers.""", """The quick brown fox jumps over the lazy dog.""", ] # fmt: off A__ = { """input_ids""": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], """attention_mask""": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCAmelCase , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=__lowerCAmelCase , )
276
import math def __lowerCamelCase ( ) -> None: """simple docstring""" A__ = input("""Enter message: """ ) A__ = int(input(F'Enter key [2-{len(__a ) - 1}]: ' ) ) A__ = input("""Encryption/Decryption [e/d]: """ ) if mode.lower().startswith("""e""" ): A__ = encrypt_message(__a , __a ) elif mode.lower().startswith("""d""" ): A__ = decrypt_message(__a , __a ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(F'Output:\n{text + "|"}' ) def __lowerCamelCase ( __a :int , __a :str ) -> str: """simple docstring""" A__ = [""""""] * key for col in range(__a ): A__ = col while pointer < len(__a ): cipher_text[col] += message[pointer] pointer += key return "".join(__a ) def __lowerCamelCase ( __a :int , __a :str ) -> str: """simple docstring""" A__ = math.ceil(len(__a ) / key ) A__ = key A__ = (num_cols * num_rows) - len(__a ) A__ = [""""""] * num_cols A__ = 0 A__ = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): A__ = 0 row += 1 return "".join(__a ) if __name__ == "__main__": import doctest doctest.testmod() main()
276
1
"""simple docstring""" import argparse import os import re import packaging.version _lowercase = '''examples/''' _lowercase = { '''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''), '''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } _lowercase = { '''init''': '''src/diffusers/__init__.py''', '''setup''': '''setup.py''', } _lowercase = '''README.md''' def _snake_case ( snake_case__ : str , snake_case__ : int , snake_case__ : Dict ): with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f: A = f.read() A , A = REPLACE_PATTERNS[pattern] A = replace.replace('VERSION' , snake_case__ ) A = re_pattern.sub(snake_case__ , snake_case__ ) with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(snake_case__ ) def _snake_case ( snake_case__ : Tuple ): for folder, directories, fnames in os.walk(snake_case__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('research_projects' ) if "legacy" in directories: directories.remove('legacy' ) for fname in fnames: if fname.endswith('.py' ): update_version_in_file(os.path.join(snake_case__ , snake_case__ ) , snake_case__ , pattern='examples' ) def _snake_case ( snake_case__ : List[str] , snake_case__ : List[Any]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(snake_case__ , snake_case__ , snake_case__ ) if not patch: update_version_in_examples(snake_case__ ) def _snake_case ( ): A = '🤗 Transformers currently provides the following architectures' A = '1. Want to contribute a new model?' with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f: A = f.readlines() # Find the start of the list. A = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 A = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('1.' ): A = lines[index].replace( 'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , ) index += 1 with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(snake_case__ ) def _snake_case ( ): with open(REPLACE_FILES['init'] , 'r' ) as f: A = f.read() A = REPLACE_PATTERNS['init'][0].search(snake_case__ ).groups()[0] return packaging.version.parse(snake_case__ ) def _snake_case ( snake_case__ : Dict=False ): A = get_version() if patch and default_version.is_devrelease: raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' ) if default_version.is_devrelease: A = default_version.base_version elif patch: A = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: A = F'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. A = input(F'Which version are you releasing? [{default_version}]' ) if len(snake_case__ ) == 0: A = default_version print(F'Updating version to {version}.' ) global_version_update(snake_case__ , patch=snake_case__ ) def _snake_case ( ): A = get_version() A = F'{current_version.major}.{current_version.minor + 1}.0.dev0' A = current_version.base_version # Check with the user we got that right. A = input(F'Which version are we developing now? [{dev_version}]' ) if len(snake_case__ ) == 0: A = dev_version print(F'Updating version to {version}.' ) global_version_update(snake_case__ ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') _lowercase = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
74
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig _lowercase = logging.get_logger(__name__) _lowercase = { '''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''', # See all DPT models at https://huggingface.co/models?filter=dpt } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Tuple = '''dpt''' def __init__( self : str ,A_ : Tuple=768 ,A_ : int=12 ,A_ : Optional[int]=12 ,A_ : Optional[int]=3072 ,A_ : List[str]="gelu" ,A_ : str=0.0 ,A_ : int=0.0 ,A_ : str=0.02 ,A_ : str=1e-12 ,A_ : str=384 ,A_ : Dict=16 ,A_ : Union[str, Any]=3 ,A_ : Dict=False ,A_ : Any=True ,A_ : Optional[int]=[2, 5, 8, 11] ,A_ : Optional[Any]="project" ,A_ : Tuple=[4, 2, 1, 0.5] ,A_ : int=[96, 192, 384, 768] ,A_ : int=256 ,A_ : str=-1 ,A_ : Optional[int]=False ,A_ : Optional[int]=True ,A_ : Union[str, Any]=0.4 ,A_ : Union[str, Any]=255 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=[1, 1024, 24, 24] ,A_ : List[str]=[0, 1] ,A_ : List[Any]=None ,**A_ : Tuple ,) -> Union[str, Any]: super().__init__(**A_ ) A = hidden_size A = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info('Initializing the config with a `BiT` backbone.' ) A = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, } A = BitConfig(**A_ ) elif isinstance(A_ ,A_ ): logger.info('Initializing the config with a `BiT` backbone.' ) A = BitConfig(**A_ ) elif isinstance(A_ ,A_ ): A = backbone_config else: raise ValueError( F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' ) A = backbone_featmap_shape A = neck_ignore_stages if readout_type != "project": raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' ) else: A = None A = None A = [] A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = patch_size A = num_channels A = qkv_bias A = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' ) A = readout_type A = reassemble_factors A = neck_hidden_sizes A = fusion_hidden_size A = head_in_index A = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) A = use_auxiliary_head A = auxiliary_loss_weight A = semantic_loss_ignore_index A = semantic_classifier_dropout def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: A = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: A = self.backbone_config.to_dict() A = self.__class__.model_type return output
74
1
'''simple docstring''' def __lowerCamelCase ( lowerCAmelCase_ ) -> list: if any(not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or x < 0 for x in sequence ): raise TypeError('Sequence must be list of non-negative integers' ) for _ in range(len(lowerCAmelCase_ ) ): for i, (rod_upper, rod_lower) in enumerate(zip(lowerCAmelCase_ , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
107
'''simple docstring''' # Function to print upper half of diamond (pyramid) def __lowerCamelCase ( lowerCAmelCase_ ) -> int: for i in range(0 , lowerCAmelCase_ ): for _ in range(0 , n - i - 1 ): # printing spaces print(' ' , end='' ) for _ in range(0 , i + 1 ): # printing stars print('* ' , end='' ) print() def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[str, Any]: for i in range(lowerCAmelCase_ , 0 , -1 ): for _ in range(lowerCAmelCase_ , 0 , -1 ): # printing stars print('* ' , end='' ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(' ' , end='' ) def __lowerCamelCase ( lowerCAmelCase_ ) -> Tuple: if n <= 0: print(' ... .... nothing printing :(' ) return floyd(lowerCAmelCase_ ) # upper half reverse_floyd(lowerCAmelCase_ ) # lower half if __name__ == "__main__": print(r'''| /\ | |- | |- |--| |\ /| |-''') print(r'''|/ \| |- |_ |_ |__| | \/ | |_''') __lowerCAmelCase = 1 while K: __lowerCAmelCase = int(input('''enter the number and , and see the magic : ''')) print() pretty_print(user_number) __lowerCAmelCase = int(input('''press 0 to exit... and 1 to continue...''')) print('''Good Bye...''')
107
1
from typing import Any def __lowercase ( lowerCamelCase : list , lowerCamelCase : list , lowerCamelCase : dict , lowerCamelCase : dict , lowerCamelCase : dict , ): _validation( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) # Creates data structures and fill initial step UpperCamelCase_ : dict = {} UpperCamelCase_ : dict = {} for state in states_space: UpperCamelCase_ : List[str] = observations_space[0] UpperCamelCase_ : Dict = ( initial_probabilities[state] * emission_probabilities[state][observation] ) UpperCamelCase_ : Optional[int] = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(lowerCamelCase ) ): UpperCamelCase_ : str = observations_space[o] UpperCamelCase_ : Optional[Any] = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function UpperCamelCase_ : Optional[int] = '' UpperCamelCase_ : int = -1 for k_state in states_space: UpperCamelCase_ : Dict = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: UpperCamelCase_ : Union[str, Any] = probability UpperCamelCase_ : Dict = k_state # Update probabilities and pointers dicts UpperCamelCase_ : Tuple = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) UpperCamelCase_ : str = arg_max # The final observation UpperCamelCase_ : Optional[Any] = observations_space[len(lowerCamelCase ) - 1] # argmax for given final observation UpperCamelCase_ : Tuple = '' UpperCamelCase_ : List[Any] = -1 for k_state in states_space: UpperCamelCase_ : List[str] = probabilities[(k_state, final_observation)] if probability > max_probability: UpperCamelCase_ : int = probability UpperCamelCase_ : Any = k_state UpperCamelCase_ : str = arg_max # Process pointers backwards UpperCamelCase_ : Optional[Any] = last_state UpperCamelCase_ : Optional[Any] = [] for o in range(len(lowerCamelCase ) - 1 , -1 , -1 ): result.append(lowerCamelCase ) UpperCamelCase_ : str = pointers[previous, observations_space[o]] result.reverse() return result def __lowercase ( lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Any , ): _validate_not_empty( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) _validate_lists(lowerCamelCase , lowerCamelCase ) _validate_dicts( lowerCamelCase , lowerCamelCase , lowerCamelCase ) def __lowercase ( lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Any , ): if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError('There\'s an empty parameter' ) def __lowercase ( lowerCamelCase : Any , lowerCamelCase : Any ): _validate_list(lowerCamelCase , 'observations_space' ) _validate_list(lowerCamelCase , 'states_space' ) def __lowercase ( lowerCamelCase : Any , lowerCamelCase : str ): if not isinstance(_object , lowerCamelCase ): UpperCamelCase_ : List[str] = F"{var_name} must be a list" raise ValueError(lowerCamelCase ) else: for x in _object: if not isinstance(lowerCamelCase , lowerCamelCase ): UpperCamelCase_ : Any = F"{var_name} must be a list of strings" raise ValueError(lowerCamelCase ) def __lowercase ( lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Any , ): _validate_dict(lowerCamelCase , 'initial_probabilities' , lowerCamelCase ) _validate_nested_dict(lowerCamelCase , 'transition_probabilities' ) _validate_nested_dict(lowerCamelCase , 'emission_probabilities' ) def __lowercase ( lowerCamelCase : Any , lowerCamelCase : str ): _validate_dict(_object , lowerCamelCase , lowerCamelCase ) for x in _object.values(): _validate_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) def __lowercase ( lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : type , lowerCamelCase : bool = False ): if not isinstance(_object , lowerCamelCase ): UpperCamelCase_ : Optional[Any] = F"{var_name} must be a dict" raise ValueError(lowerCamelCase ) if not all(isinstance(lowerCamelCase , lowerCamelCase ) for x in _object ): UpperCamelCase_ : str = F"{var_name} all keys must be strings" raise ValueError(lowerCamelCase ) if not all(isinstance(lowerCamelCase , lowerCamelCase ) for x in _object.values() ): UpperCamelCase_ : Union[str, Any] = 'nested dictionary ' if nested else '' UpperCamelCase_ : Union[str, Any] = F"{var_name} {nested_text}all values must be {value_type.__name__}" raise ValueError(lowerCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
175
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class _lowercase ( snake_case_ ): lowercase = 'megatron-bert' def __init__( self : List[str] , snake_case : Tuple=2_9_0_5_6 , snake_case : Dict=1_0_2_4 , snake_case : Dict=2_4 , snake_case : Union[str, Any]=1_6 , snake_case : Optional[int]=4_0_9_6 , snake_case : Optional[int]="gelu" , snake_case : Any=0.1 , snake_case : Tuple=0.1 , snake_case : Optional[int]=5_1_2 , snake_case : List[Any]=2 , snake_case : Tuple=0.02 , snake_case : Optional[Any]=1e-12 , snake_case : str=0 , snake_case : Optional[int]="absolute" , snake_case : Union[str, Any]=True , **snake_case : Any , ) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=snake_case , **snake_case ) UpperCamelCase_ : Optional[Any] = vocab_size UpperCamelCase_ : Any = hidden_size UpperCamelCase_ : Union[str, Any] = num_hidden_layers UpperCamelCase_ : List[Any] = num_attention_heads UpperCamelCase_ : str = hidden_act UpperCamelCase_ : List[str] = intermediate_size UpperCamelCase_ : List[Any] = hidden_dropout_prob UpperCamelCase_ : Any = attention_probs_dropout_prob UpperCamelCase_ : Tuple = max_position_embeddings UpperCamelCase_ : Dict = type_vocab_size UpperCamelCase_ : Optional[int] = initializer_range UpperCamelCase_ : Optional[Any] = layer_norm_eps UpperCamelCase_ : Dict = position_embedding_type UpperCamelCase_ : List[str] = use_cache
175
1
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging snake_case : Optional[int] = logging.get_logger(__name__) logging.set_verbosity_info() def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : str ): if "xprophetnet" in prophetnet_checkpoint_path: a__ = XLMProphetNetForConditionalGenerationOld.from_pretrained(__lowerCAmelCase ) a__ , a__ = XLMProphetNetForConditionalGeneration.from_pretrained( __lowerCAmelCase , output_loading_info=__lowerCAmelCase ) else: a__ = ProphetNetForConditionalGenerationOld.from_pretrained(__lowerCAmelCase ) a__ , a__ = ProphetNetForConditionalGeneration.from_pretrained( __lowerCAmelCase , output_loading_info=__lowerCAmelCase ) a__ = ['key_proj', 'value_proj', 'query_proj'] a__ = { 'self_attn': 'ngram_self_attn', 'cross_attn': 'encoder_attn', 'cross_attn_layer_norm': 'encoder_attn_layer_norm', 'feed_forward_layer_norm': 'final_layer_norm', 'feed_forward': '', 'intermediate': 'fc1', 'output': 'fc2', 'key_proj': 'k_proj', 'query_proj': 'q_proj', 'value_proj': 'v_proj', 'word_embeddings': 'embed_tokens', 'embeddings_layer_norm': 'emb_layer_norm', 'relative_pos_embeddings': 'relative_linear', 'ngram_embeddings': 'ngram_input_embed', 'position_embeddings': 'embed_positions', } for key in loading_info["missing_keys"]: a__ = key.split('.' ) if attributes[0] == "lm_head": a__ = prophet a__ = prophet_old else: a__ = prophet.prophetnet a__ = prophet_old.model a__ = False for attribute in attributes: if attribute in mapping: a__ = mapping[attribute] if not hasattr(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) > 0: a__ = attribute elif hasattr(__lowerCAmelCase , __lowerCAmelCase ): a__ = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" a__ = old_model.weight logger.info(F'{attribute} is initialized.' ) a__ = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" a__ = old_model.bias logger.info(F'{attribute} is initialized' ) a__ = True break elif attribute in special_keys and hasattr(__lowerCAmelCase , 'in_proj_weight' ): a__ = old_model.in_proj_weight.shape[0] // 3 a__ = getattr(__lowerCAmelCase , __lowerCAmelCase ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": a__ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) a__ = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": a__ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) a__ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": a__ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) a__ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) a__ = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings." a__ = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] ) a__ = True break if attribute.isdigit(): a__ = model[int(__lowerCAmelCase )] a__ = old_model[int(__lowerCAmelCase )] else: a__ = getattr(__lowerCAmelCase , __lowerCAmelCase ) if old_attribute == "": a__ = old_model else: if not hasattr(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError(F'{old_model} does not have {old_attribute}' ) a__ = getattr(__lowerCAmelCase , __lowerCAmelCase ) if not is_key_init: raise ValueError(F'{key} was not correctly initialized!' ) print(F'Saving model to {pytorch_dump_folder_path}' ) prophet.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": snake_case : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) snake_case : Tuple = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
109
import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class snake_case_ (unittest.TestCase ): def lowerCamelCase__( self :List[Any] ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCamelCase__( self :int ) -> Optional[Any]: a__ , a__ = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-canny' ,from_pt=__snake_case ,dtype=jnp.bfloataa ) a__ , a__ = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,controlnet=__snake_case ,from_pt=__snake_case ,dtype=jnp.bfloataa ) a__ = controlnet_params a__ = 'bird' a__ = jax.device_count() a__ = pipe.prepare_text_inputs([prompts] * num_samples ) a__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ) a__ = pipe.prepare_image_inputs([canny_image] * num_samples ) a__ = jax.random.PRNGKey(0 ) a__ = jax.random.split(__snake_case ,jax.device_count() ) a__ = replicate(__snake_case ) a__ = shard(__snake_case ) a__ = shard(__snake_case ) a__ = pipe( prompt_ids=__snake_case ,image=__snake_case ,params=__snake_case ,prng_seed=__snake_case ,num_inference_steps=50 ,jit=__snake_case ,).images assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3) a__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) a__ = images[0, 2_53:2_56, 2_53:2_56, -1] a__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) a__ = jnp.array( [0.16_79_69, 0.11_66_99, 0.08_15_43, 0.15_42_97, 0.13_28_12, 0.10_88_87, 0.16_99_22, 0.16_99_22, 0.20_50_78] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]: a__ , a__ = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-openpose' ,from_pt=__snake_case ,dtype=jnp.bfloataa ) a__ , a__ = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,controlnet=__snake_case ,from_pt=__snake_case ,dtype=jnp.bfloataa ) a__ = controlnet_params a__ = 'Chef in the kitchen' a__ = jax.device_count() a__ = pipe.prepare_text_inputs([prompts] * num_samples ) a__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' ) a__ = pipe.prepare_image_inputs([pose_image] * num_samples ) a__ = jax.random.PRNGKey(0 ) a__ = jax.random.split(__snake_case ,jax.device_count() ) a__ = replicate(__snake_case ) a__ = shard(__snake_case ) a__ = shard(__snake_case ) a__ = pipe( prompt_ids=__snake_case ,image=__snake_case ,params=__snake_case ,prng_seed=__snake_case ,num_inference_steps=50 ,jit=__snake_case ,).images assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3) a__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) a__ = images[0, 2_53:2_56, 2_53:2_56, -1] a__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) a__ = jnp.array( [[0.27_14_84, 0.26_17_19, 0.27_53_91, 0.27_73_44, 0.27_92_97, 0.29_10_16, 0.29_49_22, 0.30_27_34, 0.30_27_34]] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
109
1
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
262
import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py _UpperCAmelCase : Optional[int] ="""src/transformers""" _UpperCAmelCase : str ="""docs/source/en""" _UpperCAmelCase : Optional[int] =""".""" def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]: with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase_ : int = f.readlines() # Find the start prompt. lowerCAmelCase_ : List[Any] = 0 while not lines[start_index].startswith(lowerCAmelCase_ ): start_index += 1 start_index += 1 lowerCAmelCase_ : List[str] = start_index while not lines[end_index].startswith(lowerCAmelCase_ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | _UpperCAmelCase : Optional[Any] ="""Model|Encoder|Decoder|ForConditionalGeneration""" # Regexes that match TF/Flax/PT model names. _UpperCAmelCase : Optional[int] =re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") _UpperCAmelCase : Dict =re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _UpperCAmelCase : Optional[Any] =re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # This is to make sure the transformers module imported is the one in the repo. _UpperCAmelCase : Optional[int] =direct_transformers_import(TRANSFORMERS_PATH) def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]: lowerCAmelCase_ : str = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowerCAmelCase_ ) return [m.group(0 ) for m in matches] def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]: lowerCAmelCase_ : Tuple = 2 if text == '''✅''' or text == '''❌''' else len(lowerCAmelCase_ ) lowerCAmelCase_ : int = (width - text_length) // 2 lowerCAmelCase_ : Union[str, Any] = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def lowerCAmelCase ( )-> str: lowerCAmelCase_ : Any = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES lowerCAmelCase_ : Dict = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } lowerCAmelCase_ : List[Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. lowerCAmelCase_ : Tuple = collections.defaultdict(lowerCAmelCase_ ) lowerCAmelCase_ : List[str] = collections.defaultdict(lowerCAmelCase_ ) lowerCAmelCase_ : Optional[Any] = collections.defaultdict(lowerCAmelCase_ ) lowerCAmelCase_ : Optional[int] = collections.defaultdict(lowerCAmelCase_ ) lowerCAmelCase_ : List[str] = collections.defaultdict(lowerCAmelCase_ ) # Let's lookup through all transformers object (once). for attr_name in dir(lowerCAmelCase_ ): lowerCAmelCase_ : Optional[int] = None if attr_name.endswith('''Tokenizer''' ): lowerCAmelCase_ : Union[str, Any] = slow_tokenizers lowerCAmelCase_ : List[str] = attr_name[:-9] elif attr_name.endswith('''TokenizerFast''' ): lowerCAmelCase_ : int = fast_tokenizers lowerCAmelCase_ : Union[str, Any] = attr_name[:-13] elif _re_tf_models.match(lowerCAmelCase_ ) is not None: lowerCAmelCase_ : Tuple = tf_models lowerCAmelCase_ : str = _re_tf_models.match(lowerCAmelCase_ ).groups()[0] elif _re_flax_models.match(lowerCAmelCase_ ) is not None: lowerCAmelCase_ : Tuple = flax_models lowerCAmelCase_ : Union[str, Any] = _re_flax_models.match(lowerCAmelCase_ ).groups()[0] elif _re_pt_models.match(lowerCAmelCase_ ) is not None: lowerCAmelCase_ : Any = pt_models lowerCAmelCase_ : List[Any] = _re_pt_models.match(lowerCAmelCase_ ).groups()[0] if lookup_dict is not None: while len(lowerCAmelCase_ ) > 0: if attr_name in model_name_to_prefix.values(): lowerCAmelCase_ : Union[str, Any] = True break # Try again after removing the last word in the name lowerCAmelCase_ : Any = ''''''.join(camel_case_split(lowerCAmelCase_ )[:-1] ) # Let's build that table! lowerCAmelCase_ : int = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) lowerCAmelCase_ : Tuple = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support'''] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). lowerCAmelCase_ : Union[str, Any] = [len(lowerCAmelCase_ ) + 2 for c in columns] lowerCAmelCase_ : Optional[Any] = max([len(lowerCAmelCase_ ) for name in model_names] ) + 2 # Build the table per se lowerCAmelCase_ : Dict = '''|''' + '''|'''.join([_center_text(lowerCAmelCase_ , lowerCAmelCase_ ) for c, w in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) + '''|\n''' # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n" lowerCAmelCase_ : List[str] = {True: '''✅''', False: '''❌'''} for name in model_names: lowerCAmelCase_ : List[Any] = model_name_to_prefix[name] lowerCAmelCase_ : Union[str, Any] = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(lowerCAmelCase_ , lowerCAmelCase_ ) for l, w in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) + "|\n" return table def lowerCAmelCase ( lowerCAmelCase_=False )-> Tuple: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = _find_text_in_file( filename=os.path.join(lowerCAmelCase_ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , ) lowerCAmelCase_ : Tuple = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(lowerCAmelCase_ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( '''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' ) if __name__ == "__main__": _UpperCAmelCase : List[Any] =argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") _UpperCAmelCase : Tuple =parser.parse_args() check_model_table(args.fix_and_overwrite)
262
1
'''simple docstring''' import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _UpperCamelCase ( A ): '''simple docstring''' lowerCAmelCase__ = ["""image_processor""", """tokenizer"""] lowerCAmelCase__ = """FlavaImageProcessor""" lowerCAmelCase__ = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self : Optional[Any] , _lowerCAmelCase : str=None , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : int): '''simple docstring''' __lowercase =None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , _lowerCAmelCase , ) __lowercase =kwargs.pop('feature_extractor') __lowercase =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(_lowerCAmelCase , _lowerCAmelCase) __lowercase =self.image_processor def __call__( self : List[Any] , _lowerCAmelCase : Optional[ImageInput] = None , _lowerCAmelCase : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , _lowerCAmelCase : Union[bool, str, TruncationStrategy] = False , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : int = 0 , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , **_lowerCAmelCase : List[str] , ): '''simple docstring''' if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.') if text is not None: __lowercase =self.tokenizer( text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , ) if images is not None: __lowercase =self.image_processor( _lowerCAmelCase , return_image_mask=_lowerCAmelCase , return_codebook_pixels=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , ) if text is not None and images is not None: encoding.update(_lowerCAmelCase) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowerCAmelCase) , tensor_type=_lowerCAmelCase) def __lowerCamelCase ( self : str , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int]): '''simple docstring''' return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase) def __lowerCamelCase ( self : Optional[int] , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Optional[int]): '''simple docstring''' return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase) @property def __lowerCamelCase ( self : Tuple): '''simple docstring''' __lowercase =self.tokenizer.model_input_names __lowercase =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def __lowerCamelCase ( self : List[str]): '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowerCAmelCase , ) return self.image_processor_class @property def __lowerCamelCase ( self : List[str]): '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowerCAmelCase , ) return self.image_processor
48
'''simple docstring''' import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList lowerCamelCase = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""] class _UpperCamelCase ( A ): '''simple docstring''' def __init__( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[int]=1): '''simple docstring''' __lowercase =tokenizer __lowercase =dataset __lowercase =len(_lowerCAmelCase) if n_tasks is None else n_tasks __lowercase =n_copies def __iter__( self : Union[str, Any]): '''simple docstring''' __lowercase =[] for task in range(self.n_tasks): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip()) __lowercase =self.tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors='pt') for task in range(self.n_tasks): for _ in range(self.n_copies): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class _UpperCamelCase ( A ): '''simple docstring''' def __init__( self : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any]): '''simple docstring''' __lowercase =start_length __lowercase =eof_strings __lowercase =tokenizer def __call__( self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Any): '''simple docstring''' __lowercase =self.tokenizer.batch_decode(input_ids[:, self.start_length :]) __lowercase =[] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings)) return all(_lowerCAmelCase) def _A ( _lowerCAmelCase ): """simple docstring""" __lowercase =re.split('(%s)' % '|'.join(_lowerCAmelCase ) , _lowerCAmelCase ) # last string should be "" return "".join(string_list[:-2] ) def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=20 , **_lowerCAmelCase ): """simple docstring""" __lowercase =defaultdict(_lowerCAmelCase ) # dict of list of generated tokens for step, batch in tqdm(enumerate(_lowerCAmelCase ) ): with torch.no_grad(): __lowercase =batch['ids'].shape[-1] __lowercase =accelerator.unwrap_model(_lowerCAmelCase ).generate( input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCAmelCase , **_lowerCAmelCase ) # each task is generated batch_size times __lowercase =batch['task_id'].repeat(_lowerCAmelCase ) __lowercase =accelerator.pad_across_processes( _lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id ) __lowercase , __lowercase =accelerator.gather((generated_tokens, generated_tasks) ) __lowercase =generated_tokens.cpu().numpy() __lowercase =generated_tasks.cpu().numpy() for task, generated_tokens in zip(_lowerCAmelCase , _lowerCAmelCase ): gen_token_dict[task].append(_lowerCAmelCase ) __lowercase =[[] for _ in range(_lowerCAmelCase )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: __lowercase =tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) code_gens[task].append(remove_last_block(_lowerCAmelCase ) ) return code_gens def _A ( ): """simple docstring""" __lowercase =HfArgumentParser(_lowerCAmelCase ) __lowercase =parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric __lowercase =args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing __lowercase ='false' if args.num_workers is None: __lowercase =multiprocessing.cpu_count() # Use dataset load to feed to accelerate __lowercase =Accelerator() set_seed(args.seed , device_specific=_lowerCAmelCase ) # Load model and tokenizer __lowercase =AutoTokenizer.from_pretrained(args.model_ckpt ) __lowercase =tokenizer.eos_token __lowercase =AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings __lowercase ={ 'do_sample': args.do_sample, 'temperature': args.temperature, 'max_new_tokens': args.max_new_tokens, 'top_p': args.top_p, 'top_k': args.top_k, 'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCAmelCase , _lowerCAmelCase )] ), } # Load evaluation dataset and metric __lowercase =load_dataset('openai_humaneval' ) __lowercase =load_metric('code_eval' ) __lowercase =args.num_tasks if args.num_tasks is not None else len(human_eval['test'] ) __lowercase =args.n_samples // args.batch_size __lowercase =TokenizedDataset(_lowerCAmelCase , human_eval['test'] , n_copies=_lowerCAmelCase , n_tasks=_lowerCAmelCase ) # do not confuse args.batch_size, which is actually the num_return_sequences __lowercase =DataLoader(_lowerCAmelCase , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: __lowercase =code_eval_metric.compute(references=[''] , predictions=[['']] ) except ValueError as exception: print( 'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`' ' flag to enable code evaluation.' ) raise exception __lowercase , __lowercase =accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase ) __lowercase =complete_code( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , n_tasks=_lowerCAmelCase , batch_size=args.batch_size , **_lowerCAmelCase , ) if accelerator.is_main_process: __lowercase =[] for task in tqdm(range(_lowerCAmelCase ) ): __lowercase =human_eval['test'][task]['test'] __lowercase =f"""check({human_eval['test'][task]['entry_point']})""" references.append('\n' + test_func + '\n' + entry_point ) # Evaluate completions with "code_eval" metric __lowercase , __lowercase =code_eval_metric.compute( references=_lowerCAmelCase , predictions=_lowerCAmelCase , num_workers=args.num_workers ) print(f"""Results: {pass_at_k}""" ) # Save results to json file with open(args.output_file , 'w' ) as fp: json.dump(_lowerCAmelCase , _lowerCAmelCase ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
48
1
'''simple docstring''' from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": A__: Union[str, Any] = input('''Enter image url: ''').strip() print(F"Downloading image from {url} ...") A__: Tuple = BeautifulSoup(requests.get(url).content, '''html.parser''') # The image URL is in the content field of the first meta tag with property og:image A__: Union[str, Any] = soup.find('''meta''', {'''property''': '''og:image'''})['''content'''] A__: List[Any] = requests.get(image_url).content A__: List[str] = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg" with open(file_name, '''wb''') as fp: fp.write(image_data) print(F"Done. Image saved to disk as {file_name}.")
276
'''simple docstring''' from __future__ import annotations class A__ : def __init__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str ) -> Optional[int]: '''simple docstring''' _a , _a : List[str] =text, pattern _a , _a : Union[str, Any] =len(SCREAMING_SNAKE_CASE ), len(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str ) -> int: '''simple docstring''' for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :int ) -> int: '''simple docstring''' for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def __UpperCAmelCase ( self :Union[str, Any] ) -> list[int]: '''simple docstring''' # searches pattern in text and returns index positions _a : Union[str, Any] =[] for i in range(self.textLen - self.patLen + 1 ): _a : Any =self.mismatch_in_text(SCREAMING_SNAKE_CASE ) if mismatch_index == -1: positions.append(SCREAMING_SNAKE_CASE ) else: _a : int =self.match_in_pattern(self.text[mismatch_index] ) _a : List[str] =( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions A__: Any = '''ABAABA''' A__: int = '''AB''' A__: Optional[int] = BoyerMooreSearch(text, pattern) A__: Optional[Any] = bms.bad_character_heuristic() if len(positions) == 0: print('''No match found''') else: print('''Pattern found in following positions: ''') print(positions)
276
1
import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class lowerCAmelCase_ : '''simple docstring''' @staticmethod def UpperCamelCase__ ( *_UpperCAmelCase , **_UpperCAmelCase ): pass def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> str: """simple docstring""" snake_case_ = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' __snake_case = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): snake_case_ = DepthEstimationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ): snake_case_ = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _UpperCAmelCase ) import datasets snake_case_ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) snake_case_ = depth_estimator( [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] ) self.assertEqual( [ {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, ] , _UpperCAmelCase , ) @require_tf @unittest.skip('''Depth estimation is not implemented in TF''' ) def UpperCamelCase__ ( self ): pass @slow @require_torch def UpperCamelCase__ ( self ): snake_case_ = '''Intel/dpt-large''' snake_case_ = pipeline('''depth-estimation''' , model=_UpperCAmelCase ) snake_case_ = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) snake_case_ = hashimage(outputs['''depth'''] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 ) @require_torch def UpperCamelCase__ ( self ): # This is highly irregular to have no small tests. self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
353
# Function to print upper half of diamond (pyramid) def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Dict: """simple docstring""" for i in range(0 , SCREAMING_SNAKE_CASE ): for _ in range(0 , n - i - 1 ): # printing spaces print(''' ''' , end='''''' ) for _ in range(0 , i + 1 ): # printing stars print('''* ''' , end='''''' ) print() def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Dict: """simple docstring""" for i in range(SCREAMING_SNAKE_CASE , 0 , -1 ): for _ in range(SCREAMING_SNAKE_CASE , 0 , -1 ): # printing stars print('''* ''' , end='''''' ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(''' ''' , end='''''' ) def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Any: """simple docstring""" if n <= 0: print(''' ... .... nothing printing :(''' ) return floyd(SCREAMING_SNAKE_CASE ) # upper half reverse_floyd(SCREAMING_SNAKE_CASE ) # lower half if __name__ == "__main__": print(r"""| /\ | |- | |- |--| |\ /| |-""") print(r"""|/ \| |- |_ |_ |__| | \/ | |_""") UpperCAmelCase = 1 while K: UpperCAmelCase = int(input("""enter the number and , and see the magic : """)) print() pretty_print(user_number) UpperCAmelCase = int(input("""press 0 to exit... and 1 to continue...""")) print("""Good Bye...""")
267
0
# Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar __lowerCAmelCase : str = TypeVar('T') class snake_case__ (Generic[T] ): """simple docstring""" def __init__( self : Optional[Any] , __lowerCamelCase : bool = True ) -> None: a = {} # dictionary of lists a = directed def __UpperCAmelCase ( self : str , __lowerCamelCase : T , __lowerCamelCase : T ) -> GraphAdjacencyList[T]: if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(__lowerCamelCase ) self.adj_list[destination_vertex].append(__lowerCamelCase ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(__lowerCamelCase ) a = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(__lowerCamelCase ) a = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: a = [destination_vertex] a = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(__lowerCamelCase ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(__lowerCamelCase ) a = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: a = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: a = [destination_vertex] a = [] return self def __repr__( self : Tuple ) -> str: return pformat(self.adj_list )
107
import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __lowerCAmelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model') @require_sentencepiece @require_tokenizers class snake_case__ (_UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = GPTSwaTokenizer SCREAMING_SNAKE_CASE_ : int = False SCREAMING_SNAKE_CASE_ : Union[str, Any] = True SCREAMING_SNAKE_CASE_ : Any = False def __UpperCAmelCase ( self : Tuple ) -> Any: super().setUp() # We have a SentencePiece fixture for testing a = GPTSwaTokenizer(__lowerCamelCase , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Any: a = "This is a test" a = "This is a test" return input_text, output_text def __UpperCAmelCase ( self : List[Any] ) -> List[str]: a = "<s>" a = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] ) -> int: a = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(__lowerCamelCase ) , 20_00 ) def __UpperCAmelCase ( self : Optional[int] ) -> Dict: self.assertEqual(self.get_tokenizer().vocab_size , 20_00 ) def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]: a = GPTSwaTokenizer(__lowerCamelCase ) a = tokenizer.tokenize("This is a test" ) self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [4_65, 2_87, 2_65, 6_31, 8_42] ) a = tokenizer.tokenize("I was born in 92000, and this is falsé." ) # fmt: off self.assertListEqual( __lowerCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , ) # fmt: on a = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , ) a = tokenizer.convert_ids_to_tokens(__lowerCamelCase ) # fmt: off self.assertListEqual( __lowerCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] ) # fmt: on def __UpperCAmelCase ( self : List[Any] ) -> str: a = GPTSwaTokenizer(__lowerCamelCase ) a = ["This is a test", "I was born in 92000, and this is falsé."] a = [ [4_65, 2_87, 2_65, 6_31, 8_42], [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(__lowerCamelCase , __lowerCamelCase ): self.assertListEqual(tokenizer.encode_fast(__lowerCamelCase ) , __lowerCamelCase ) # Test that decode_fast returns the input text for text, token_ids in zip(__lowerCamelCase , __lowerCamelCase ): self.assertEqual(tokenizer.decode_fast(__lowerCamelCase ) , __lowerCamelCase ) @slow def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]: a = [ "<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')", "Hey there, how are you doing this fine day?", "This is a text with a trailing spaces followed by a dot .", "Häj sväjs lillebrör! =)", "Det är inget fel på Mr. Cool", ] # fmt: off a = {"input_ids": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name="AI-Sweden/gpt-sw3-126m" , sequences=__lowerCamelCase , )
107
1
"""simple docstring""" def a__ ( __SCREAMING_SNAKE_CASE = 5_0 ) -> Union[str, Any]: __lowerCAmelCase: Dict = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F'''{solution() = }''')
369
"""simple docstring""" import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class snake_case ( __snake_case, unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Tuple = PriorTransformer SCREAMING_SNAKE_CASE_ : List[str] = """hidden_states""" @property def lowercase_ ( self : Dict)-> str: '''simple docstring''' __lowerCAmelCase: str = 4 __lowerCAmelCase: int = 8 __lowerCAmelCase: int = 7 __lowerCAmelCase: str = floats_tensor((batch_size, embedding_dim)).to(UpperCamelCase__) __lowerCAmelCase: Optional[Any] = floats_tensor((batch_size, embedding_dim)).to(UpperCamelCase__) __lowerCAmelCase: Any = floats_tensor((batch_size, num_embeddings, embedding_dim)).to(UpperCamelCase__) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def lowercase_ ( self : Optional[int] , UpperCamelCase__ : str=0)-> str: '''simple docstring''' torch.manual_seed(UpperCamelCase__) __lowerCAmelCase: List[Any] = 4 __lowerCAmelCase: Dict = 8 __lowerCAmelCase: int = 7 __lowerCAmelCase: List[str] = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__) __lowerCAmelCase: Tuple = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__) __lowerCAmelCase: List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim)).to(UpperCamelCase__) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def lowercase_ ( self : Dict)-> List[Any]: '''simple docstring''' return (4, 8) @property def lowercase_ ( self : Optional[int])-> int: '''simple docstring''' return (4, 8) def lowercase_ ( self : Optional[int])-> Tuple: '''simple docstring''' __lowerCAmelCase: str = { "num_attention_heads": 2, "attention_head_dim": 4, "num_layers": 2, "embedding_dim": 8, "num_embeddings": 7, "additional_embeddings": 4, } __lowerCAmelCase: Any = self.dummy_input return init_dict, inputs_dict def lowercase_ ( self : List[Any])-> int: '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase: Optional[int] = PriorTransformer.from_pretrained( "hf-internal-testing/prior-dummy" , output_loading_info=UpperCamelCase__) self.assertIsNotNone(UpperCamelCase__) self.assertEqual(len(loading_info["missing_keys"]) , 0) model.to(UpperCamelCase__) __lowerCAmelCase: Dict = model(**self.dummy_input)[0] assert hidden_states is not None, "Make sure output is not None" def lowercase_ ( self : List[str])-> Tuple: '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.prepare_init_args_and_inputs_for_common() __lowerCAmelCase: Tuple = self.model_class(**UpperCamelCase__) __lowerCAmelCase: List[str] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase: List[Any] = [*signature.parameters.keys()] __lowerCAmelCase: Any = ["hidden_states", "timestep"] self.assertListEqual(arg_names[:2] , UpperCamelCase__) def lowercase_ ( self : Optional[int])-> List[str]: '''simple docstring''' __lowerCAmelCase: int = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy") __lowerCAmelCase: Union[str, Any] = model.to(UpperCamelCase__) if hasattr(UpperCamelCase__ , "set_default_attn_processor"): model.set_default_attn_processor() __lowerCAmelCase: str = self.get_dummy_seed_input() with torch.no_grad(): __lowerCAmelCase: Dict = model(**UpperCamelCase__)[0] __lowerCAmelCase: Dict = output[0, :5].flatten().cpu() print(UpperCamelCase__) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. __lowerCAmelCase: List[str] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239]) self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2)) @slow class snake_case ( unittest.TestCase ): def lowercase_ ( self : int , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : str=7_6_8 , UpperCamelCase__ : int=7_7 , UpperCamelCase__ : Any=0)-> Union[str, Any]: '''simple docstring''' torch.manual_seed(UpperCamelCase__) __lowerCAmelCase: List[Any] = batch_size __lowerCAmelCase: Any = embedding_dim __lowerCAmelCase: Dict = num_embeddings __lowerCAmelCase: Dict = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__) __lowerCAmelCase: str = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__) __lowerCAmelCase: int = torch.randn((batch_size, num_embeddings, embedding_dim)).to(UpperCamelCase__) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def lowercase_ ( self : List[Any])-> Union[str, Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [1_3, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]], [3_7, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]], # fmt: on ]) def lowercase_ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int)-> List[Any]: '''simple docstring''' __lowerCAmelCase: List[str] = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior") model.to(UpperCamelCase__) __lowerCAmelCase: Dict = self.get_dummy_seed_input(seed=UpperCamelCase__) with torch.no_grad(): __lowerCAmelCase: Optional[Any] = model(**UpperCamelCase__)[0] assert list(sample.shape) == [1, 7_6_8] __lowerCAmelCase: Dict = sample[0, :8].flatten().cpu() print(UpperCamelCase__) __lowerCAmelCase: Union[str, Any] = torch.tensor(UpperCamelCase__) assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3)
108
0
"""simple docstring""" from collections import defaultdict from math import gcd def _snake_case ( UpperCamelCase : int = 1500000 ): UpperCAmelCase : defaultdict = defaultdict(UpperCamelCase ) UpperCAmelCase : Union[str, Any] = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase , 2 ): if gcd(UpperCamelCase , UpperCamelCase ) > 1: continue UpperCAmelCase : str = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(UpperCamelCase , limit + 1 , UpperCamelCase ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(f"""{solution() = }""")
109
"""simple docstring""" import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def _snake_case ( *UpperCamelCase : str , UpperCamelCase : Optional[Union[Dict, Any]] = None , UpperCamelCase : Tuple=True , UpperCamelCase : Optional[int]=2 ): from .. import __version__ UpperCAmelCase : Tuple = take_from UpperCAmelCase : Optional[Any] = () if not isinstance(args[0] , UpperCamelCase ): UpperCAmelCase : List[str] = (args,) for attribute, version_name, message in args: if version.parse(version.parse(UpperCamelCase ).base_version ) >= version.parse(UpperCamelCase ): raise ValueError( F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'" F" version {__version__} is >= {version_name}" ) UpperCAmelCase : Optional[int] = None if isinstance(UpperCamelCase , UpperCamelCase ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(UpperCamelCase ),) UpperCAmelCase : List[str] = F"The `{attribute}` argument is deprecated and will be removed in version {version_name}." elif hasattr(UpperCamelCase , UpperCamelCase ): values += (getattr(UpperCamelCase , UpperCamelCase ),) UpperCAmelCase : List[Any] = F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}." elif deprecated_kwargs is None: UpperCAmelCase : int = F"`{attribute}` is deprecated and will be removed in version {version_name}." if warning is not None: UpperCAmelCase : Optional[Any] = warning + """ """ if standard_warn else """""" warnings.warn(warning + message , UpperCamelCase , stacklevel=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) > 0: UpperCAmelCase : Optional[int] = inspect.getouterframes(inspect.currentframe() )[1] UpperCAmelCase : Union[str, Any] = call_frame.filename UpperCAmelCase : List[Any] = call_frame.lineno UpperCAmelCase : List[str] = call_frame.function UpperCAmelCase , UpperCAmelCase : Optional[int] = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" ) if len(UpperCamelCase ) == 0: return elif len(UpperCamelCase ) == 1: return values[0] return values
109
1
"""simple docstring""" import math class _lowercase : """simple docstring""" def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : list[list[float]] , UpperCamelCase__ : list[int] ) -> int: '''simple docstring''' __UpperCamelCase =0.0 __UpperCamelCase =0.0 for i in range(len(UpperCamelCase__ ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def UpperCAmelCase_ ( self : str , UpperCamelCase__ : list[list[int | float]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : float ) -> list[list[int | float]]: '''simple docstring''' for i in range(len(UpperCamelCase__ ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def lowerCAmelCase (): """simple docstring""" __UpperCamelCase =[[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) __UpperCamelCase =[[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training __UpperCamelCase =SelfOrganizingMap() __UpperCamelCase =3 __UpperCamelCase =0.5 for _ in range(__UpperCamelCase ): for j in range(len(__UpperCamelCase ) ): # training sample __UpperCamelCase =training_samples[j] # Compute the winning vector __UpperCamelCase =self_organizing_map.get_winner(__UpperCamelCase , __UpperCamelCase ) # Update the winning vector __UpperCamelCase =self_organizing_map.update(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # classify test sample __UpperCamelCase =[0, 0, 0, 1] __UpperCamelCase =self_organizing_map.get_winner(__UpperCamelCase , __UpperCamelCase ) # results print(F"""Clusters that the test sample belongs to : {winner}""" ) print(F"""Weights that have been trained : {weights}""" ) # running the main() function if __name__ == "__main__": main()
85
"""simple docstring""" from queue import PriorityQueue from typing import Any import numpy as np def lowerCAmelCase (__UpperCamelCase : dict , __UpperCamelCase : str , __UpperCamelCase : set , __UpperCamelCase : set , __UpperCamelCase : dict , __UpperCamelCase : dict , __UpperCamelCase : PriorityQueue , __UpperCamelCase : dict , __UpperCamelCase : float | int , ): """simple docstring""" for nxt, d in graph[v]: if nxt in visited_forward: continue __UpperCamelCase =cst_fwd.get(__UpperCamelCase , np.inf ) __UpperCamelCase =cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) __UpperCamelCase =new_cost_f __UpperCamelCase =v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: __UpperCamelCase =cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def lowerCAmelCase (__UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : dict , __UpperCamelCase : dict ): """simple docstring""" __UpperCamelCase =-1 __UpperCamelCase =set() __UpperCamelCase =set() __UpperCamelCase ={source: 0} __UpperCamelCase ={destination: 0} __UpperCamelCase ={source: None} __UpperCamelCase ={destination: None} __UpperCamelCase =PriorityQueue() __UpperCamelCase =PriorityQueue() __UpperCamelCase =np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): __UpperCamelCase , __UpperCamelCase =queue_forward.get() visited_forward.add(__UpperCamelCase ) __UpperCamelCase , __UpperCamelCase =queue_backward.get() visited_backward.add(__UpperCamelCase ) __UpperCamelCase =pass_and_relaxation( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) __UpperCamelCase =pass_and_relaxation( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: __UpperCamelCase =shortest_distance return shortest_path_distance __lowercase = { '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } __lowercase = { '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
85
1
import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: lowerCamelCase : Dict = SwinConfig(image_size=192 ) if "base" in model_name: lowerCamelCase : int = 6 lowerCamelCase : str = 128 lowerCamelCase : int = (2, 2, 18, 2) lowerCamelCase : Optional[int] = (4, 8, 16, 32) elif "large" in model_name: lowerCamelCase : Union[str, Any] = 12 lowerCamelCase : List[Any] = 192 lowerCamelCase : int = (2, 2, 18, 2) lowerCamelCase : Optional[int] = (6, 12, 24, 48) else: raise ValueError("Model not supported, only supports base and large variants" ) lowerCamelCase : List[Any] = window_size lowerCamelCase : List[Any] = embed_dim lowerCamelCase : Tuple = depths lowerCamelCase : Any = num_heads return config def A ( _SCREAMING_SNAKE_CASE ) -> str: if "encoder.mask_token" in name: lowerCamelCase : Optional[Any] = name.replace("encoder.mask_token" ,"embeddings.mask_token" ) if "encoder.patch_embed.proj" in name: lowerCamelCase : Optional[int] = name.replace("encoder.patch_embed.proj" ,"embeddings.patch_embeddings.projection" ) if "encoder.patch_embed.norm" in name: lowerCamelCase : int = name.replace("encoder.patch_embed.norm" ,"embeddings.norm" ) if "attn.proj" in name: lowerCamelCase : Dict = name.replace("attn.proj" ,"attention.output.dense" ) if "attn" in name: lowerCamelCase : Tuple = name.replace("attn" ,"attention.self" ) if "norm1" in name: lowerCamelCase : Any = name.replace("norm1" ,"layernorm_before" ) if "norm2" in name: lowerCamelCase : List[str] = name.replace("norm2" ,"layernorm_after" ) if "mlp.fc1" in name: lowerCamelCase : int = name.replace("mlp.fc1" ,"intermediate.dense" ) if "mlp.fc2" in name: lowerCamelCase : Optional[Any] = name.replace("mlp.fc2" ,"output.dense" ) if name == "encoder.norm.weight": lowerCamelCase : Optional[int] = "layernorm.weight" if name == "encoder.norm.bias": lowerCamelCase : Any = "layernorm.bias" if "decoder" in name: pass else: lowerCamelCase : Optional[int] = "swin." + name return name def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for key in orig_state_dict.copy().keys(): lowerCamelCase : List[Any] = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "attn_mask" in key: pass elif "qkv" in key: lowerCamelCase : Optional[Any] = key.split("." ) lowerCamelCase : Union[str, Any] = int(key_split[2] ) lowerCamelCase : Any = int(key_split[4] ) lowerCamelCase : Any = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: lowerCamelCase : List[str] = val[:dim, :] lowerCamelCase : List[str] = val[ dim : dim * 2, : ] lowerCamelCase : List[str] = val[-dim:, :] else: lowerCamelCase : Tuple = val[ :dim ] lowerCamelCase : List[Any] = val[ dim : dim * 2 ] lowerCamelCase : Dict = val[ -dim: ] else: lowerCamelCase : List[str] = val return orig_state_dict def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]: lowerCamelCase : Dict = torch.load(_SCREAMING_SNAKE_CASE ,map_location="cpu" )["model"] lowerCamelCase : Dict = get_swin_config(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[Any] = SwinForMaskedImageModeling(_SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase : Optional[int] = convert_state_dict(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Any = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase : str = ViTImageProcessor(size={"height": 192, "width": 192} ) lowerCamelCase : Optional[Any] = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw ) lowerCamelCase : Tuple = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors="pt" ) with torch.no_grad(): lowerCamelCase : List[str] = model(**_SCREAMING_SNAKE_CASE ).logits print(outputs.keys() ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: print(f'''Pushing model and image processor for {model_name} to hub''' ) model.push_to_hub(f'''microsoft/{model_name}''' ) image_processor.push_to_hub(f'''microsoft/{model_name}''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='swin-base-simmim-window6-192', type=str, choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'], help='Name of the Swin SimMIM model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth', type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) SCREAMING_SNAKE_CASE__ : str = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
48
import math def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float: if ( not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * power_factor def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float: if ( not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
48
1
import torch from diffusers import StableDiffusionPipeline lowercase_ = """path-to-your-trained-model""" lowercase_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""") lowercase_ = """A photo of sks dog in a bucket""" lowercase_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("""dog-bucket.png""")
224
from collections import namedtuple import requests from lxml import html # type: ignore lowercase_ = namedtuple("""covid_data""", """cases deaths recovered""") def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "https://www.worldometers.info/coronavirus/" ): lowercase__ = "//div[@class = \"maincounter-number\"]/span/text()" return covid_data(*html.fromstring(requests.get(SCREAMING_SNAKE_CASE_ ).content ).xpath(SCREAMING_SNAKE_CASE_ ) ) lowercase_ = """Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}""" print(fmt.format(*covid_stats()))
224
1
"""simple docstring""" import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class UpperCAmelCase : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=2 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=36 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=6 , _UpperCAmelCase=6 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=1000 , ): lowercase__: Any = parent lowercase__: Any = batch_size lowercase__: Optional[Any] = num_channels lowercase__: str = image_size lowercase__: List[str] = patch_size lowercase__: Union[str, Any] = text_seq_length lowercase__: Dict = is_training lowercase__: Union[str, Any] = use_input_mask lowercase__: Optional[int] = use_token_type_ids lowercase__: int = use_labels lowercase__: Optional[int] = vocab_size lowercase__: Optional[int] = hidden_size lowercase__: Any = num_hidden_layers lowercase__: Any = num_attention_heads lowercase__: Optional[int] = intermediate_size lowercase__: int = hidden_act lowercase__: List[Any] = hidden_dropout_prob lowercase__: str = attention_probs_dropout_prob lowercase__: str = max_position_embeddings lowercase__: List[str] = type_vocab_size lowercase__: Dict = type_sequence_label_size lowercase__: Optional[int] = initializer_range lowercase__: int = coordinate_size lowercase__: int = shape_size lowercase__: Tuple = num_labels lowercase__: Dict = num_choices lowercase__: Tuple = scope lowercase__: Tuple = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) lowercase__: Optional[int] = text_seq_length lowercase__: int = (image_size // patch_size) ** 2 + 1 lowercase__: Optional[int] = self.text_seq_length + self.image_seq_length def _snake_case ( self ): lowercase__: Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) lowercase__: Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowercase__: Any = bbox[i, j, 3] lowercase__: Any = bbox[i, j, 1] lowercase__: Dict = t if bbox[i, j, 2] < bbox[i, j, 0]: lowercase__: str = bbox[i, j, 2] lowercase__: Union[str, Any] = bbox[i, j, 0] lowercase__: List[str] = t lowercase__: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__: Optional[int] = None if self.use_input_mask: lowercase__: Any = random_attention_mask([self.batch_size, self.text_seq_length] ) lowercase__: int = None if self.use_token_type_ids: lowercase__: Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) lowercase__: int = None lowercase__: Any = None if self.use_labels: lowercase__: List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__: str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) lowercase__: List[Any] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Tuple = LayoutLMvaModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() # text + image lowercase__: int = model(__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE ) lowercase__: int = model( __SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE ) lowercase__: List[str] = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE ) lowercase__: List[Any] = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only lowercase__: Optional[int] = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only lowercase__: List[str] = model(pixel_values=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Tuple = self.num_labels lowercase__: str = LayoutLMvaForSequenceClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowercase__: Optional[Any] = model( __SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Union[str, Any] = self.num_labels lowercase__: Optional[int] = LayoutLMvaForTokenClassification(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowercase__: Tuple = model( __SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Dict = LayoutLMvaForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowercase__: int = model( __SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self ): lowercase__: Dict = self.prepare_config_and_inputs() ( ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ): Any = config_and_inputs lowercase__: Optional[int] = { '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Any = False _UpperCAmelCase :Optional[int] = False _UpperCAmelCase :Dict = False _UpperCAmelCase :List[Any] = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) _UpperCAmelCase :int = ( {"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel} if is_torch_available() else {} ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): return True def _snake_case ( self ): lowercase__: Optional[int] = LayoutLMvaModelTester(self ) lowercase__: Optional[int] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ): lowercase__: Optional[Any] = copy.deepcopy(__SCREAMING_SNAKE_CASE ) if model_class in get_values(__SCREAMING_SNAKE_CASE ): lowercase__: Dict = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__SCREAMING_SNAKE_CASE ): lowercase__: str = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) elif model_class in get_values(__SCREAMING_SNAKE_CASE ): lowercase__: List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) lowercase__: Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) elif model_class in [ *get_values(__SCREAMING_SNAKE_CASE ), ]: lowercase__: Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) elif model_class in [ *get_values(__SCREAMING_SNAKE_CASE ), ]: lowercase__: Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE , ) return inputs_dict def _snake_case ( self ): self.config_tester.run_common_tests() def _snake_case ( self ): lowercase__: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def _snake_case ( self ): lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase__: List[str] = type self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def _snake_case ( self ): lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE ) def _snake_case ( self ): lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE ) def _snake_case ( self ): lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE ) @slow def _snake_case ( self ): for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__: Tuple = LayoutLMvaModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE__ ( ) -> Any: lowercase__: Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch class UpperCAmelCase (unittest.TestCase ): """simple docstring""" @cached_property def _snake_case ( self ): return LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE ) if is_vision_available() else None @slow def _snake_case ( self ): lowercase__: Optional[Any] = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(__SCREAMING_SNAKE_CASE ) lowercase__: int = self.default_image_processor lowercase__: Tuple = prepare_img() lowercase__: str = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values.to(__SCREAMING_SNAKE_CASE ) lowercase__: Union[str, Any] = torch.tensor([[1, 2]] ) lowercase__: Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass lowercase__: str = model( input_ids=input_ids.to(__SCREAMING_SNAKE_CASE ) , bbox=bbox.to(__SCREAMING_SNAKE_CASE ) , pixel_values=pixel_values.to(__SCREAMING_SNAKE_CASE ) , ) # verify the logits lowercase__: List[str] = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , __SCREAMING_SNAKE_CASE ) lowercase__: Tuple = torch.tensor( [[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
177
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : Union[str, Any] = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = [ 'SEW_PRETRAINED_MODEL_ARCHIVE_LIST', 'SEWForCTC', 'SEWForSequenceClassification', 'SEWModel', 'SEWPreTrainedModel', ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys UpperCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
267
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE = { """configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""], """convert_funnel_original_tf_checkpoint_to_pytorch""": [], """tokenization_funnel""": ["""FunnelTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = ["""FunnelTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""", """FunnelBaseModel""", """FunnelForMaskedLM""", """FunnelForMultipleChoice""", """FunnelForPreTraining""", """FunnelForQuestionAnswering""", """FunnelForSequenceClassification""", """FunnelForTokenClassification""", """FunnelModel""", """FunnelPreTrainedModel""", """load_tf_weights_in_funnel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFFunnelBaseModel""", """TFFunnelForMaskedLM""", """TFFunnelForMultipleChoice""", """TFFunnelForPreTraining""", """TFFunnelForQuestionAnswering""", """TFFunnelForSequenceClassification""", """TFFunnelForTokenClassification""", """TFFunnelModel""", """TFFunnelPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
367
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = ["image_processor", "tokenizer"] a__ = "BridgeTowerImageProcessor" a__ = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Any ) -> Optional[int]: super().__init__(__lowerCamelCase , __lowerCamelCase ) def __call__( self : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Union[bool, str, TruncationStrategy] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : Dict , ) -> BatchEncoding: A : List[Any] = self.tokenizer( text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) # add pixel_values + pixel_mask A : List[Any] = self.image_processor( __lowerCamelCase , return_tensors=__lowerCamelCase , do_normalize=__lowerCamelCase , do_center_crop=__lowerCamelCase , **__lowerCamelCase ) encoding.update(__lowerCamelCase ) return encoding def SCREAMING_SNAKE_CASE__ ( self : int , *__lowerCamelCase : List[str] , **__lowerCamelCase : str ) -> List[Any]: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : str ) -> Any: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase ) @property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]: A : Dict = self.tokenizer.model_input_names A : Tuple = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
256
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A__ = logging.get_logger(__name__) A__ = { """google/pix2struct-textcaps-base""": ( """https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json""" ), } class __lowerCAmelCase ( lowerCamelCase__ ): __lowerCamelCase = '''pix2struct_text_model''' __lowerCamelCase = ['''past_key_values'''] __lowerCamelCase = { '''hidden_size''': '''hidden_size''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , _snake_case=50244 , _snake_case=768 , _snake_case=64 , _snake_case=2048 , _snake_case=12 , _snake_case=12 , _snake_case=32 , _snake_case=128 , _snake_case=0.1 , _snake_case=1e-6 , _snake_case=1.0 , _snake_case="gelu_new" , _snake_case=0 , _snake_case=False , _snake_case=0 , _snake_case=1 , _snake_case=False , _snake_case=True , **_snake_case , ): """simple docstring""" _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = d_kv _lowerCAmelCase = d_ff _lowerCAmelCase = num_layers _lowerCAmelCase = num_heads _lowerCAmelCase = relative_attention_num_buckets _lowerCAmelCase = relative_attention_max_distance _lowerCAmelCase = dropout_rate _lowerCAmelCase = layer_norm_epsilon _lowerCAmelCase = initializer_factor _lowerCAmelCase = use_cache _lowerCAmelCase = eos_token_id _lowerCAmelCase = decoder_start_token_id # for backwards compatibility _lowerCAmelCase = dense_act_fn super().__init__( pad_token_id=_snake_case , eos_token_id=_snake_case , decoder_start_token_id=_snake_case , tie_word_embeddings=_snake_case , is_decoder=_snake_case , **_snake_case , ) @classmethod def snake_case ( cls , _snake_case , **_snake_case ): """simple docstring""" cls._set_token_in_kwargs(_snake_case ) _lowerCAmelCase , _lowerCAmelCase = cls.get_config_dict(_snake_case , **_snake_case ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": _lowerCAmelCase = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_snake_case , **_snake_case ) class __lowerCAmelCase ( lowerCamelCase__ ): __lowerCamelCase = '''pix2struct_vision_model''' def __init__( self , _snake_case=768 , _snake_case=768 , _snake_case=2048 , _snake_case=64 , _snake_case=12 , _snake_case=12 , _snake_case="gelu_new" , _snake_case=1e-6 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=1e-10 , _snake_case=1.0 , _snake_case=4096 , _snake_case=32 , _snake_case=128 , **_snake_case , ): """simple docstring""" super().__init__(**_snake_case ) _lowerCAmelCase = hidden_size _lowerCAmelCase = patch_embed_hidden_size _lowerCAmelCase = d_ff _lowerCAmelCase = dropout_rate _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = initializer_range _lowerCAmelCase = initializer_factor _lowerCAmelCase = attention_dropout _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = dense_act_fn _lowerCAmelCase = seq_len _lowerCAmelCase = relative_attention_num_buckets _lowerCAmelCase = relative_attention_max_distance _lowerCAmelCase = d_kv @classmethod def snake_case ( cls , _snake_case , **_snake_case ): """simple docstring""" cls._set_token_in_kwargs(_snake_case ) _lowerCAmelCase , _lowerCAmelCase = cls.get_config_dict(_snake_case , **_snake_case ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": _lowerCAmelCase = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_snake_case , **_snake_case ) class __lowerCAmelCase ( lowerCamelCase__ ): __lowerCamelCase = '''pix2struct''' __lowerCamelCase = True def __init__( self , _snake_case=None , _snake_case=None , _snake_case=1.0 , _snake_case=0.02 , _snake_case=False , _snake_case=False , _snake_case=True , **_snake_case , ): """simple docstring""" super().__init__(tie_word_embeddings=_snake_case , is_encoder_decoder=_snake_case , **_snake_case ) if text_config is None: _lowerCAmelCase = {} logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" ) if vision_config is None: _lowerCAmelCase = {} logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" ) _lowerCAmelCase = PixaStructTextConfig(**_snake_case ) _lowerCAmelCase = PixaStructVisionConfig(**_snake_case ) _lowerCAmelCase = self.text_config.decoder_start_token_id _lowerCAmelCase = self.text_config.pad_token_id _lowerCAmelCase = self.text_config.eos_token_id _lowerCAmelCase = initializer_factor _lowerCAmelCase = initializer_range _lowerCAmelCase = self.initializer_range _lowerCAmelCase = self.initializer_range _lowerCAmelCase = is_vqa @classmethod def snake_case ( cls , _snake_case , _snake_case , **_snake_case ): """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_snake_case ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = copy.deepcopy(self.__dict__ ) _lowerCAmelCase = self.text_config.to_dict() _lowerCAmelCase = self.vision_config.to_dict() _lowerCAmelCase = self.__class__.model_type return output
82
"""simple docstring""" def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if principal <= 0: raise Exception("Principal borrowed must be > 0" ) if rate_per_annum < 0: raise Exception("Rate of interest must be >= 0" ) if years_to_repay <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise Exception("Years to repay must be an integer > 0" ) # Yearly rate is divided by 12 to get monthly rate lowerCAmelCase : Tuple = rate_per_annum / 1_2 # Years to repay is multiplied by 12 to get number of payments as payment is monthly lowerCAmelCase : List[Any] = years_to_repay * 1_2 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
108
0
def UpperCAmelCase_ (_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] ): global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: __UpperCamelCase : str = mf_knapsack(i - 1 , snake_case_ , snake_case_ , snake_case_ ) else: __UpperCamelCase : Union[str, Any] = max( mf_knapsack(i - 1 , snake_case_ , snake_case_ , snake_case_ ) , mf_knapsack(i - 1 , snake_case_ , snake_case_ , j - wt[i - 1] ) + val[i - 1] , ) __UpperCamelCase : List[str] = val return f[i][j] def UpperCAmelCase_ (_lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ): __UpperCamelCase : Optional[Any] = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: __UpperCamelCase : List[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: __UpperCamelCase : Dict = dp[i - 1][w_] return dp[n][w_], dp def UpperCAmelCase_ (_lowerCAmelCase : int , _lowerCAmelCase : list , _lowerCAmelCase : list ): if not (isinstance(snake_case_ , (list, tuple) ) and isinstance(snake_case_ , (list, tuple) )): raise ValueError( "Both the weights and values vectors must be either lists or tuples" ) __UpperCamelCase : int = len(snake_case_ ) if num_items != len(snake_case_ ): __UpperCamelCase : Any = ( """The number of weights must be the same as the number of values.\n""" F'''But got {num_items} weights and {len(snake_case_ )} values''' ) raise ValueError(snake_case_ ) for i in range(snake_case_ ): if not isinstance(wt[i] , snake_case_ ): __UpperCamelCase : List[Any] = ( """All weights must be integers but got weight of """ F'''type {type(wt[i] )} at index {i}''' ) raise TypeError(snake_case_ ) __UpperCamelCase : Union[str, Any] = knapsack(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) __UpperCamelCase : set = set() _construct_solution(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) return optimal_val, example_optional_set def UpperCAmelCase_ (_lowerCAmelCase : list , _lowerCAmelCase : list , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : set ): # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(snake_case_ , snake_case_ , i - 1 , snake_case_ , snake_case_ ) else: optimal_set.add(snake_case_ ) _construct_solution(snake_case_ , snake_case_ , i - 1 , j - wt[i - 1] , snake_case_ ) if __name__ == "__main__": lowercase : str = [3, 2, 4, 4] lowercase : Optional[int] = [4, 3, 2, 3] lowercase : Any = 4 lowercase : List[str] = 6 lowercase : Dict = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] lowercase : Any = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 lowercase : List[str] = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("optimal_value = ", optimal_solution) print("An optimal subset corresponding to the optimal value", optimal_subset)
355
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase : Any = logging.get_logger(__name__) lowercase : Dict = {"vocab_file": "spm_char.model"} lowercase : Tuple = { "vocab_file": { "microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model", "microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model", "microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model", } } lowercase : Union[str, Any] = { "microsoft/speecht5_asr": 1024, "microsoft/speecht5_tts": 1024, "microsoft/speecht5_vc": 1024, } class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ): """simple docstring""" lowercase : Dict = VOCAB_FILES_NAMES lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Optional[Any] = ['input_ids', 'attention_mask'] def __init__( self , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase = None , **__UpperCamelCase , ) -> None: '''simple docstring''' __UpperCamelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , ) __UpperCamelCase : List[Any] = vocab_file __UpperCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCamelCase ) @property def __lowerCamelCase ( self ) -> List[Any]: '''simple docstring''' return self.sp_model.get_piece_size() def __lowerCamelCase ( self ) -> Optional[Any]: '''simple docstring''' __UpperCamelCase : Optional[int] = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> str: '''simple docstring''' __UpperCamelCase : Any = self.__dict__.copy() __UpperCamelCase : Union[str, Any] = None return state def __setstate__( self , __UpperCamelCase ) -> List[str]: '''simple docstring''' __UpperCamelCase : Dict = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __UpperCamelCase : List[Any] = {} __UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowerCamelCase ( self , __UpperCamelCase ) -> List[str]: '''simple docstring''' return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase ) def __lowerCamelCase ( self , __UpperCamelCase ) -> str: '''simple docstring''' return self.sp_model.piece_to_id(__UpperCamelCase ) def __lowerCamelCase ( self , __UpperCamelCase ) -> int: '''simple docstring''' __UpperCamelCase : Optional[Any] = self.sp_model.IdToPiece(__UpperCamelCase ) return token def __lowerCamelCase ( self , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' __UpperCamelCase : Optional[Any] = [] __UpperCamelCase : Any = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__UpperCamelCase ) + token __UpperCamelCase : Any = [] else: current_sub_tokens.append(__UpperCamelCase ) out_string += self.sp_model.decode(__UpperCamelCase ) return out_string.strip() def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase ) __UpperCamelCase : str = [1] if token_ids_a is None: return ([0] * len(__UpperCamelCase )) + suffix_ones return ([0] * len(__UpperCamelCase )) + ([0] * len(__UpperCamelCase )) + suffix_ones def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCamelCase : Optional[Any] = os.path.join( __UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCamelCase , "wb" ) as fi: __UpperCamelCase : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(__UpperCamelCase ) return (out_vocab_file,)
171
0
'''simple docstring''' import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": _SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--txt2img_unclip", default="kakaobrain/karlo-v1-alpha", type=str, required=False, help="The pretrained txt2img unclip.", ) _SCREAMING_SNAKE_CASE : int = parser.parse_args() _SCREAMING_SNAKE_CASE : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) _SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPImageProcessor() _SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14") _SCREAMING_SNAKE_CASE : int = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
85
'''simple docstring''' import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": _SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--txt2img_unclip", default="kakaobrain/karlo-v1-alpha", type=str, required=False, help="The pretrained txt2img unclip.", ) _SCREAMING_SNAKE_CASE : int = parser.parse_args() _SCREAMING_SNAKE_CASE : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) _SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPImageProcessor() _SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14") _SCREAMING_SNAKE_CASE : int = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
85
1
import argparse import os import shutil from pathlib import Path import onnx import torch from packaging import version from torch.onnx import export from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline __A = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a , __a , __a=False , ) -> Optional[int]: """simple docstring""" output_path.parent.mkdir(parents=lowercase__ , exist_ok=lowercase__ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( lowercase__ , lowercase__ , f=output_path.as_posix() , input_names=lowercase__ , output_names=lowercase__ , dynamic_axes=lowercase__ , do_constant_folding=lowercase__ , use_external_data_format=lowercase__ , enable_onnx_checker=lowercase__ , opset_version=lowercase__ , ) else: export( lowercase__ , lowercase__ , f=output_path.as_posix() , input_names=lowercase__ , output_names=lowercase__ , dynamic_axes=lowercase__ , do_constant_folding=lowercase__ , opset_version=lowercase__ , ) @torch.no_grad() def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> Any: """simple docstring""" lowerCamelCase__: Any =torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): lowerCamelCase__: Union[str, Any] ="""cuda""" elif fpaa and not torch.cuda.is_available(): raise ValueError("`float16` model export is only supported on GPUs with CUDA" ) else: lowerCamelCase__: Any ="""cpu""" lowerCamelCase__: Any =StableDiffusionPipeline.from_pretrained(lowercase__ , torch_dtype=lowercase__ ).to(lowercase__ ) lowerCamelCase__: int =Path(lowercase__ ) # TEXT ENCODER lowerCamelCase__: int =pipeline.text_encoder.config.max_position_embeddings lowerCamelCase__: int =pipeline.text_encoder.config.hidden_size lowerCamelCase__: List[Any] =pipeline.tokenizer( "A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=lowercase__ , return_tensors="pt" , ) onnx_export( pipeline.text_encoder , model_args=(text_input.input_ids.to(device=lowercase__ , dtype=torch.intaa )) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={ "input_ids": {0: "batch", 1: "sequence"}, } , opset=lowercase__ , ) del pipeline.text_encoder # UNET lowerCamelCase__: Optional[Any] =pipeline.unet.config.in_channels lowerCamelCase__: Union[str, Any] =pipeline.unet.config.sample_size lowerCamelCase__: List[Any] =output_path / """unet""" / """model.onnx""" onnx_export( pipeline.unet , model_args=( torch.randn(2 , lowercase__ , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ), torch.randn(2 ).to(device=lowercase__ , dtype=lowercase__ ), torch.randn(2 , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ), False, ) , output_path=lowercase__ , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={ "sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, "timestep": {0: "batch"}, "encoder_hidden_states": {0: "batch", 1: "sequence"}, } , opset=lowercase__ , use_external_data_format=lowercase__ , ) lowerCamelCase__: Union[str, Any] =str(unet_path.absolute().as_posix() ) lowerCamelCase__: int =os.path.dirname(lowercase__ ) lowerCamelCase__: Tuple =onnx.load(lowercase__ ) # clean up existing tensor files shutil.rmtree(lowercase__ ) os.mkdir(lowercase__ ) # collate external tensor files into one onnx.save_model( lowercase__ , lowercase__ , save_as_external_data=lowercase__ , all_tensors_to_one_file=lowercase__ , location="weights.pb" , convert_attribute=lowercase__ , ) del pipeline.unet # VAE ENCODER lowerCamelCase__: int =pipeline.vae lowerCamelCase__: Tuple =vae_encoder.config.in_channels lowerCamelCase__: List[str] =vae_encoder.config.sample_size # need to get the raw tensor output (sample) from the encoder lowerCamelCase__: List[str] =lambda __a , __a : vae_encoder.encode(lowercase__ , lowercase__ )[0].sample() onnx_export( lowercase__ , model_args=( torch.randn(1 , lowercase__ , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ), False, ) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={ "sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, } , opset=lowercase__ , ) # VAE DECODER lowerCamelCase__: int =pipeline.vae lowerCamelCase__: int =vae_decoder.config.latent_channels lowerCamelCase__: Dict =vae_decoder.config.out_channels # forward only through the decoder part lowerCamelCase__: Union[str, Any] =vae_encoder.decode onnx_export( lowercase__ , model_args=( torch.randn(1 , lowercase__ , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ), False, ) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={ "latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, } , opset=lowercase__ , ) del pipeline.vae # SAFETY CHECKER if pipeline.safety_checker is not None: lowerCamelCase__: Union[str, Any] =pipeline.safety_checker lowerCamelCase__: Tuple =safety_checker.config.vision_config.num_channels lowerCamelCase__: Union[str, Any] =safety_checker.config.vision_config.image_size lowerCamelCase__: List[Any] =safety_checker.forward_onnx onnx_export( pipeline.safety_checker , model_args=( torch.randn( 1 , lowercase__ , lowercase__ , lowercase__ , ).to(device=lowercase__ , dtype=lowercase__ ), torch.randn(1 , lowercase__ , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ), ) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={ "clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"}, "images": {0: "batch", 1: "height", 2: "width", 3: "channels"}, } , opset=lowercase__ , ) del pipeline.safety_checker lowerCamelCase__: Dict =OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" ) lowerCamelCase__: Optional[int] =pipeline.feature_extractor else: lowerCamelCase__: Tuple =None lowerCamelCase__: Tuple =None lowerCamelCase__: str =OnnxStableDiffusionPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) , scheduler=pipeline.scheduler , safety_checker=lowercase__ , feature_extractor=lowercase__ , requires_safety_checker=safety_checker is not None , ) onnx_pipeline.save_pretrained(lowercase__ ) print("ONNX pipeline saved to" , lowercase__ ) del pipeline del onnx_pipeline lowerCamelCase__: int =OnnxStableDiffusionPipeline.from_pretrained(lowercase__ , provider="CPUExecutionProvider" ) print("ONNX pipeline is loadable" ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") __A = parser.parse_args() convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
356
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") __A = logging.getLogger(__name__) @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowercase_ = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) lowercase_ = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowercase_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "The input training data file (a text file)."} ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Overwrite the cached training and evaluation sets"} ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "The number of processes to use for the preprocessing."} , ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "The maximum total input sequence length after tokenization. If passed, sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "Whether to pad all samples to the maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) } , ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) lowercase_ = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]: '''simple docstring''' if self.train_file is not None: lowerCamelCase__: List[Any] =self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: lowerCamelCase__: List[Any] =self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowercase_ = 42 lowercase_ = True lowercase_ = None lowercase_ = None def __call__(self : Any , UpperCAmelCase_ : Dict) ->List[Any]: '''simple docstring''' lowerCamelCase__: str ="label" if "label" in features[0].keys() else "labels" lowerCamelCase__: Union[str, Any] =[feature.pop(UpperCAmelCase_) for feature in features] lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_) lowerCamelCase__: int =len(features[0]["input_ids"]) lowerCamelCase__: List[Any] =[ [{k: v[i] for k, v in feature.items()} for i in range(UpperCAmelCase_)] for feature in features ] lowerCamelCase__: Dict =list(chain(*UpperCAmelCase_)) lowerCamelCase__: Tuple =self.tokenizer.pad( UpperCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten lowerCamelCase__: Optional[Any] ={k: v.view(UpperCAmelCase_ , UpperCAmelCase_ , -1) for k, v in batch.items()} # Add back labels lowerCamelCase__: Optional[Any] =torch.tensor(UpperCAmelCase_ , dtype=torch.intaa) return batch def lowerCAmelCase_ ( ) -> Optional[int]: """simple docstring""" lowerCamelCase__: str =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[str] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag" , __a , __a ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowerCamelCase__: Dict =training_args.get_process_log_level() logger.setLevel(__a ) datasets.utils.logging.set_verbosity(__a ) transformers.utils.logging.set_verbosity(__a ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. lowerCamelCase__: List[str] =None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCamelCase__: Any =get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: lowerCamelCase__: List[Any] ={} if data_args.train_file is not None: lowerCamelCase__: List[str] =data_args.train_file if data_args.validation_file is not None: lowerCamelCase__: Optional[Any] =data_args.validation_file lowerCamelCase__: List[Any] =data_args.train_file.split("." )[-1] lowerCamelCase__: int =load_dataset( __a , data_files=__a , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. lowerCamelCase__: List[Any] =load_dataset( "swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCamelCase__: int =AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowerCamelCase__: List[str] =AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowerCamelCase__: Dict =AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. lowerCamelCase__: Optional[int] =[F"""ending{i}""" for i in range(4 )] lowerCamelCase__: List[str] ="sent1" lowerCamelCase__: List[str] ="sent2" if data_args.max_seq_length is None: lowerCamelCase__: Optional[int] =tokenizer.model_max_length if max_seq_length > 1024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) lowerCamelCase__: Optional[int] =1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) lowerCamelCase__: Any =min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(__a ): lowerCamelCase__: Tuple =[[context] * 4 for context in examples[context_name]] lowerCamelCase__: List[Any] =examples[question_header_name] lowerCamelCase__: Dict =[ [F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__a ) ] # Flatten out lowerCamelCase__: str =list(chain(*__a ) ) lowerCamelCase__: str =list(chain(*__a ) ) # Tokenize lowerCamelCase__: List[Any] =tokenizer( __a , __a , truncation=__a , max_length=__a , padding="max_length" if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(__a ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) lowerCamelCase__: List[Any] =raw_datasets["train"] if data_args.max_train_samples is not None: lowerCamelCase__: Dict =min(len(__a ) , data_args.max_train_samples ) lowerCamelCase__: Any =train_dataset.select(range(__a ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): lowerCamelCase__: Optional[Any] =train_dataset.map( __a , batched=__a , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) lowerCamelCase__: str =raw_datasets["validation"] if data_args.max_eval_samples is not None: lowerCamelCase__: Any =min(len(__a ) , data_args.max_eval_samples ) lowerCamelCase__: List[Any] =eval_dataset.select(range(__a ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): lowerCamelCase__: Tuple =eval_dataset.map( __a , batched=__a , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator lowerCamelCase__: Any =( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=__a , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(__a ): lowerCamelCase__ , lowerCamelCase__: List[str] =eval_predictions lowerCamelCase__: Optional[int] =np.argmax(__a , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer lowerCamelCase__: Dict =Trainer( model=__a , args=__a , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__a , data_collator=__a , compute_metrics=__a , ) # Training if training_args.do_train: lowerCamelCase__: List[Any] =None if training_args.resume_from_checkpoint is not None: lowerCamelCase__: int =training_args.resume_from_checkpoint elif last_checkpoint is not None: lowerCamelCase__: str =last_checkpoint lowerCamelCase__: Any =trainer.train(resume_from_checkpoint=__a ) trainer.save_model() # Saves the tokenizer too for easy upload lowerCamelCase__: int =train_result.metrics lowerCamelCase__: Optional[int] =( data_args.max_train_samples if data_args.max_train_samples is not None else len(__a ) ) lowerCamelCase__: Optional[int] =min(__a , len(__a ) ) trainer.log_metrics("train" , __a ) trainer.save_metrics("train" , __a ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) lowerCamelCase__: int =trainer.evaluate() lowerCamelCase__: Dict =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__a ) lowerCamelCase__: Tuple =min(__a , len(__a ) ) trainer.log_metrics("eval" , __a ) trainer.save_metrics("eval" , __a ) lowerCamelCase__: str ={ "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**__a ) else: trainer.create_model_card(**__a ) def lowerCAmelCase_ ( __a ) -> Optional[int]: """simple docstring""" main() if __name__ == "__main__": main()
273
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase__ : int = { """configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""], """tokenization_deberta""": ["""DebertaTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Optional[int] = ["""DebertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Tuple = [ """DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """DebertaForMaskedLM""", """DebertaForQuestionAnswering""", """DebertaForSequenceClassification""", """DebertaForTokenClassification""", """DebertaModel""", """DebertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : List[Any] = [ """TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFDebertaForMaskedLM""", """TFDebertaForQuestionAnswering""", """TFDebertaForSequenceClassification""", """TFDebertaForTokenClassification""", """TFDebertaModel""", """TFDebertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys lowercase__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
224
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase__ ( lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = ( """This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.""" """It takes two arguments named `image` which should be the original image, and `label` which should be a text """ """describing the elements what should be identified in the segmentation mask. The tool returns the mask.""" ) _SCREAMING_SNAKE_CASE = """CIDAS/clipseg-rd64-refined""" _SCREAMING_SNAKE_CASE = """image_segmenter""" _SCREAMING_SNAKE_CASE = CLIPSegForImageSegmentation _SCREAMING_SNAKE_CASE = ["""image""", """text"""] _SCREAMING_SNAKE_CASE = ["""image"""] def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Optional[int] ): requires_backends(self , ['vision'] ) super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : "Image" , SCREAMING_SNAKE_CASE_ : str ): return self.pre_processor(text=[label] , images=[image] , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ): with torch.no_grad(): lowerCAmelCase_ : List[str] = self.model(**SCREAMING_SNAKE_CASE_ ).logits return logits def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ): lowerCAmelCase_ : Dict = outputs.cpu().detach().numpy() lowerCAmelCase_ : Optional[Any] = 0 lowerCAmelCase_ : Optional[Any] = 1 return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
224
1
'''simple docstring''' import numpy as np def _snake_case ( _SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray: """simple docstring""" return 1 / (1 + np.exp(-vector )) def _snake_case ( _SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray: """simple docstring""" return vector * sigmoid(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
356
'''simple docstring''' import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip UpperCAmelCase = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def _snake_case ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]: """simple docstring""" if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ) -> Dict: """simple docstring""" return max(metric_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for gt in ground_truths ) def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: """simple docstring""" lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , """r""" ).readlines()] lowerCAmelCase = [] if args.gold_data_mode == "qa": lowerCAmelCase = pd.read_csv(_SCREAMING_SNAKE_CASE , sep="""\t""" , header=_SCREAMING_SNAKE_CASE ) for answer_list in data[1]: lowerCAmelCase = ast.literal_eval(_SCREAMING_SNAKE_CASE ) answers.append(_SCREAMING_SNAKE_CASE ) else: lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , """r""" ).readlines()] lowerCAmelCase = [[reference] for reference in references] lowerCAmelCase = lowerCAmelCase = lowerCAmelCase = 0 for prediction, ground_truths in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): total += 1 em += metric_max_over_ground_truths(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) fa += metric_max_over_ground_truths(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase = 100.0 * em / total lowerCAmelCase = 100.0 * fa / total logger.info(f'F1: {fa:.2f}' ) logger.info(f'EM: {em:.2f}' ) def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]: """simple docstring""" lowerCAmelCase = args.k lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , """r""" ).readlines()] lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , """r""" ).readlines()] lowerCAmelCase = lowerCAmelCase = 0 for hypo, reference in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCAmelCase = set(hypo.split("""\t""" )[:k] ) lowerCAmelCase = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k lowerCAmelCase = 100.0 * em / total logger.info(f'Precision@{k}: {em: .2f}' ) def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] ) -> Any: """simple docstring""" def strip_title(_SCREAMING_SNAKE_CASE : Union[str, Any] ): if title.startswith("""\"""" ): lowerCAmelCase = title[1:] if title.endswith("""\"""" ): lowerCAmelCase = title[:-1] return title lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( _SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device ) lowerCAmelCase = rag_model.rag.question_encoder(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = question_enc_outputs[0] lowerCAmelCase = rag_model.retriever( _SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) lowerCAmelCase = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) lowerCAmelCase = [] for docs in all_docs: lowerCAmelCase = [strip_title(_SCREAMING_SNAKE_CASE ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(_SCREAMING_SNAKE_CASE ) ) return provenance_strings def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple: """simple docstring""" with torch.no_grad(): lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( _SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = inputs_dict.input_ids.to(args.device ) lowerCAmelCase = inputs_dict.attention_mask.to(args.device ) lowerCAmelCase = rag_model.generate( # rag_model overwrites generate _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) lowerCAmelCase = rag_model.retriever.generator_tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) if args.print_predictions: for q, a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): logger.info("""Q: {} - A: {}""".format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) return answers def _snake_case ( ) -> Dict: """simple docstring""" lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=_SCREAMING_SNAKE_CASE , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=_SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=_SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=_SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=_SCREAMING_SNAKE_CASE , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=_SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=_SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=_SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=_SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=_SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=_SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=50 , type=_SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) lowerCAmelCase = parser.parse_args() lowerCAmelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]: """simple docstring""" lowerCAmelCase = {} if args.model_type is None: lowerCAmelCase = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): lowerCAmelCase = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration lowerCAmelCase = args.n_docs if args.index_name is not None: lowerCAmelCase = args.index_name if args.index_path is not None: lowerCAmelCase = args.index_path else: lowerCAmelCase = BartForConditionalGeneration lowerCAmelCase = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , _SCREAMING_SNAKE_CASE ) lowerCAmelCase = get_scores if args.eval_mode == """e2e""" else get_precision_at_k lowerCAmelCase = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(_SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(_SCREAMING_SNAKE_CASE ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): lowerCAmelCase = RagRetriever.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) lowerCAmelCase = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , retriever=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) model.retriever.init_retrieval() else: lowerCAmelCase = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: lowerCAmelCase = [] for line in tqdm(_SCREAMING_SNAKE_CASE ): questions.append(line.strip() ) if len(_SCREAMING_SNAKE_CASE ) == args.eval_batch_size: lowerCAmelCase = evaluate_batch_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(_SCREAMING_SNAKE_CASE ) + """\n""" ) preds_file.flush() lowerCAmelCase = [] if len(_SCREAMING_SNAKE_CASE ) > 0: lowerCAmelCase = evaluate_batch_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(_SCREAMING_SNAKE_CASE ) ) preds_file.flush() score_fn(_SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": UpperCAmelCase = get_args() main(args)
187
0
'''simple docstring''' import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def snake_case ( UpperCAmelCase )-> Dict: # picklable for multiprocessing """simple docstring""" return x.sum() def snake_case ( UpperCAmelCase )-> Any: # picklable for multiprocessing """simple docstring""" return i + 1 @dataclass class UpperCamelCase__ : UpperCAmelCase__ : List[Any] = 4_2 UpperCAmelCase__ : Dict = 4_2 class UpperCamelCase__ ( _lowercase): def lowercase_ ( self :List[Any] ) -> List[Any]: '''simple docstring''' __A = {} __A = [] __A = 1 __A = [1, 2] __A = {'a': 1, 'b': 2} __A = {'a': [1, 2], 'b': [3, 4]} __A = {'a': {'1': 1}, 'b': 2} __A = {'a': 1, 'b': 2, 'c': 3, 'd': 4} __A = {} __A = [] __A = 2 __A = [2, 3] __A = {'a': 2, 'b': 3} __A = {'a': [2, 3], 'b': [4, 5]} __A = {'a': {'1': 2}, 'b': 3} __A = {'a': 2, 'b': 3, 'c': 4, 'd': 5} self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) __A = 2 self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase ) __A = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )} __A = {'a': 2, 'b': 0, 'c': 2} __A = { 'a': np.eye(2 ).astype(__UpperCamelCase ), 'b': np.zeros(3 ).astype(__UpperCamelCase ), 'c': np.ones(2 ).astype(__UpperCamelCase ), } self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , map_numpy=__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(__UpperCamelCase , __UpperCamelCase , map_numpy=__UpperCamelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , map_numpy=__UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(__UpperCamelCase , __UpperCamelCase , map_numpy=__UpperCamelCase , num_proc=__UpperCamelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(__UpperCamelCase ): # can't pickle a local lambda map_nested(lambda _A : x + 1 , __UpperCamelCase , num_proc=__UpperCamelCase ) def lowercase_ ( self :Optional[Any] ) -> List[str]: '''simple docstring''' __A = {'a': 1, 'b': 2} __A = {'a': 3, 'b': 4} __A = {'a': 5, 'b': 6} __A = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) ) , __UpperCamelCase ) def lowercase_ ( self :str ) -> Tuple: '''simple docstring''' class UpperCamelCase__ : UpperCAmelCase__ : Tuple = 'bar' __A = Foo() self.assertEqual(foo.my_attr , 'bar' ) with temporary_assignment(__UpperCamelCase , 'my_attr' , 'BAR' ): self.assertEqual(foo.my_attr , 'BAR' ) self.assertEqual(foo.my_attr , 'bar' ) @pytest.mark.parametrize( 'iterable_length, num_proc, expected_num_proc' , [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (1_6, 1_6, 1_6), (1_6, 1_7, 1_6), (1_7, 1_6, 1_6), ] , ) def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> List[str]: """simple docstring""" with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch( 'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool: __A = {f'{i}': i for i in range(a__ )} __A = map_nested(lambda UpperCAmelCase : x + 1_0 , a__ , num_proc=a__ , parallel_min_length=1_6 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class UpperCamelCase__ ( _lowercase): @require_tf def lowercase_ ( self :List[str] ) -> List[str]: '''simple docstring''' import tensorflow as tf from tensorflow.keras import layers __A = layers.Dense(2 ) def gen_random_output(): __A = tf.random.uniform((1, 3) ) return model(__UpperCamelCase ).numpy() with temp_seed(42 , set_tensorflow=__UpperCamelCase ): __A = gen_random_output() with temp_seed(42 , set_tensorflow=__UpperCamelCase ): __A = gen_random_output() __A = gen_random_output() np.testing.assert_equal(__UpperCamelCase , __UpperCamelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def lowercase_ ( self :List[Any] ) -> Tuple: '''simple docstring''' import torch def gen_random_output(): __A = torch.nn.Linear(3 , 2 ) __A = torch.rand(1 , 3 ) return model(__UpperCamelCase ).detach().numpy() with temp_seed(42 , set_pytorch=__UpperCamelCase ): __A = gen_random_output() with temp_seed(42 , set_pytorch=__UpperCamelCase ): __A = gen_random_output() __A = gen_random_output() np.testing.assert_equal(__UpperCamelCase , __UpperCamelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def lowercase_ ( self :Tuple ) -> List[str]: '''simple docstring''' def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): __A = gen_random_output() with temp_seed(42 ): __A = gen_random_output() __A = gen_random_output() np.testing.assert_equal(__UpperCamelCase , __UpperCamelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize('input_data' , [{}] ) def snake_case ( UpperCAmelCase )-> List[str]: """simple docstring""" __A = NestedDataStructure(a__ ).data assert output_data == input_data @pytest.mark.parametrize( 'data, expected_output' , [ ({}, []), ([], []), ('foo', ['foo']), (['foo', 'bar'], ['foo', 'bar']), ([['foo', 'bar']], ['foo', 'bar']), ([[['foo'], ['bar']]], ['foo', 'bar']), ([[['foo'], 'bar']], ['foo', 'bar']), ({'a': 1, 'b': 2}, [1, 2]), ({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]), ({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]), ({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]), ({'a': {'1': 1}, 'b': 2}, [1, 2]), ({'a': {'1': [1]}, 'b': 2}, [1, 2]), ({'a': {'1': [1]}, 'b': [2]}, [1, 2]), ] , ) def snake_case ( UpperCAmelCase , UpperCAmelCase )-> Tuple: """simple docstring""" __A = NestedDataStructure(a__ ).flatten() assert output == expected_output def snake_case ( )-> Optional[Any]: """simple docstring""" __A = A(x=1 , y='foobar' ) __A = {'x': 1, 'y': 'foobar'} assert asdict(a__ ) == expected_output __A = {'a': {'b': A(x=1_0 , y='foo' )}, 'c': [A(x=2_0 , y='bar' )]} __A = {'a': {'b': {'x': 1_0, 'y': 'foo'}}, 'c': [{'x': 2_0, 'y': 'bar'}]} assert asdict(a__ ) == expected_output with pytest.raises(a__ ): asdict([1, A(x=1_0 , y='foo' )] ) def snake_case ( UpperCAmelCase )-> Optional[Any]: """simple docstring""" return text.split() def snake_case ( UpperCAmelCase )-> Any: """simple docstring""" yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def snake_case ( )-> Any: """simple docstring""" with Pool(2 ) as pool: __A = list(iflatmap_unordered(a__ , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 1_0 ) ) assert out.count('hello' ) == 1_0 assert out.count('there' ) == 1_0 assert len(a__ ) == 2_0 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: __A = list(iflatmap_unordered(a__ , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 1_0 ) ) assert out.count('hello' ) == 1_0 assert out.count('there' ) == 1_0 assert len(a__ ) == 2_0 # check that we get items as fast as possible with Pool(2 ) as pool: __A = [] for yield_time, content in iflatmap_unordered( a__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(a__ ) assert out.count('a' ) == 2 assert out.count('b' ) == 2 assert len(a__ ) == 4
161
"""simple docstring""" import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ ( _lowercase): def __init__( self : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str]=13 , __UpperCamelCase : str=7 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : int=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : List[Any]=99 , __UpperCamelCase : Dict=32 , __UpperCamelCase : int=5 , __UpperCamelCase : str=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Any=512 , __UpperCamelCase : List[str]=16 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : List[str]=0.0_2 , __UpperCamelCase : str=False , __UpperCamelCase : Dict=True , __UpperCamelCase : Tuple="None" , __UpperCamelCase : Dict=3 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Any=None , ) -> Tuple: _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_input_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_labels _UpperCamelCase = num_choices _UpperCamelCase = relative_attention _UpperCamelCase = position_biased_input _UpperCamelCase = pos_att_type _UpperCamelCase = scope def _UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCamelCase = None if self.use_input_mask: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def _UpperCamelCase ( self : Optional[int] ) -> List[Any]: _UpperCamelCase = self.get_config() _UpperCamelCase = 300 return config def _UpperCamelCase ( self : int , __UpperCamelCase : List[Any] ) -> str: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def _UpperCamelCase ( self : Any , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] ) -> List[str]: _UpperCamelCase = DebertaModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )[0] _UpperCamelCase = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )[0] _UpperCamelCase = model(__UpperCamelCase )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Optional[int] ) -> Tuple: _UpperCamelCase = DebertaForMaskedLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Tuple ) -> List[Any]: _UpperCamelCase = self.num_labels _UpperCamelCase = DebertaForSequenceClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(__UpperCamelCase ) def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str ) -> Dict: _UpperCamelCase = self.num_labels _UpperCamelCase = DebertaForTokenClassification(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ) -> List[Any]: _UpperCamelCase = DebertaForQuestionAnswering(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCamelCase = model( __UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _UpperCamelCase ( self : Any ) -> Union[str, Any]: _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase): snake_case__ = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) snake_case__ = ( { '''feature-extraction''': DebertaModel, '''fill-mask''': DebertaForMaskedLM, '''question-answering''': DebertaForQuestionAnswering, '''text-classification''': DebertaForSequenceClassification, '''token-classification''': DebertaForTokenClassification, '''zero-shot''': DebertaForSequenceClassification, } if is_torch_available() else {} ) snake_case__ = True snake_case__ = False snake_case__ = False snake_case__ = False snake_case__ = False def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple: _UpperCamelCase = DebertaModelTester(self ) _UpperCamelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 ) def _UpperCamelCase ( self : Optional[int] ) -> int: self.config_tester.run_common_tests() def _UpperCamelCase ( self : Any ) -> List[str]: _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__UpperCamelCase ) def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCamelCase ) def _UpperCamelCase ( self : List[str] ) -> Union[str, Any]: _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCamelCase ) def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]: _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCamelCase ) def _UpperCamelCase ( self : Dict ) -> Tuple: _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCamelCase ) @slow def _UpperCamelCase ( self : Any ) -> Optional[Any]: for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = DebertaModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase_ ( unittest.TestCase): @unittest.skip(reason='''Model not available yet''' ) def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]: pass @slow def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]: _UpperCamelCase = DebertaModel.from_pretrained('''microsoft/deberta-base''' ) _UpperCamelCase = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) _UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0] # compare the actual values for a slice. _UpperCamelCase = torch.tensor( [[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
256
0
"""simple docstring""" import operator as op a : Dict = """scaler.pt""" a : int = """pytorch_model""" a : Tuple = """random_states""" a : Dict = """optimizer""" a : List[str] = """scheduler""" a : Optional[Any] = """pytorch_model.bin""" a : Optional[int] = """pytorch_model.bin.index.json""" a : Any = """model.safetensors""" a : Tuple = """model.safetensors.index.json""" a : Optional[int] = """1.10.2""" a : Optional[int] = """py38""" a : Union[str, Any] = """4.17.0""" a : Any = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""] a : Dict = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""] a : Dict = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""] a : Tuple = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""] a : Union[str, Any] = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""] a : List[str] = """2.0.1""" a : Dict = ["""pdsh""", """standard""", """openmpi""", """mvapich"""] a : Optional[int] = ["""default""", """reduce-overhead""", """max-autotune"""] a : Union[str, Any] = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 a : str = [ """nnodes""", """nproc_per_node""", """rdzv_backend""", """rdzv_endpoint""", """rdzv_id""", """rdzv_conf""", """standalone""", """max_restarts""", """monitor_interval""", """start_method""", """role""", """module""", """m""", """no_python""", """run_path""", """log_dir""", """r""", """redirects""", """t""", """tee""", """node_rank""", """master_addr""", """master_port""", ] a : str = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""] a : Optional[Any] = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
363
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Union[str, Any] = { """configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : int = [ """PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""", """PegasusXForConditionalGeneration""", """PegasusXModel""", """PegasusXPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys a : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
150
0
import requests def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str ) -> None: __snake_case = {'''Content-Type''': '''application/json'''} __snake_case = requests.post(snake_case_ , json={'''text''': message_body} , headers=snake_case_ ) if response.status_code != 200: __snake_case = ( '''Request to slack returned an error ''' f"""{response.status_code}, the response is:\n{response.text}""" ) raise ValueError(snake_case_ ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
24
"""simple docstring""" import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class lowerCamelCase : '''simple docstring''' def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=16 , _lowerCamelCase=[1, 2, 1] , _lowerCamelCase=[2, 2, 4] , _lowerCamelCase=2 , _lowerCamelCase=2.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=10 , _lowerCamelCase=8 , _lowerCamelCase=["stage1", "stage2", "stage3"] , _lowerCamelCase=[1, 2, 3] , ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = parent UpperCAmelCase__ : Union[str, Any] = batch_size UpperCAmelCase__ : Optional[int] = image_size UpperCAmelCase__ : Tuple = patch_size UpperCAmelCase__ : List[Any] = num_channels UpperCAmelCase__ : Dict = embed_dim UpperCAmelCase__ : List[Any] = depths UpperCAmelCase__ : Dict = num_heads UpperCAmelCase__ : Any = window_size UpperCAmelCase__ : str = mlp_ratio UpperCAmelCase__ : str = qkv_bias UpperCAmelCase__ : int = hidden_dropout_prob UpperCAmelCase__ : Tuple = attention_probs_dropout_prob UpperCAmelCase__ : Dict = drop_path_rate UpperCAmelCase__ : Union[str, Any] = hidden_act UpperCAmelCase__ : Union[str, Any] = use_absolute_embeddings UpperCAmelCase__ : Optional[int] = patch_norm UpperCAmelCase__ : Any = layer_norm_eps UpperCAmelCase__ : Dict = initializer_range UpperCAmelCase__ : Tuple = is_training UpperCAmelCase__ : Union[str, Any] = scope UpperCAmelCase__ : int = use_labels UpperCAmelCase__ : Optional[int] = type_sequence_label_size UpperCAmelCase__ : Tuple = encoder_stride UpperCAmelCase__ : Optional[int] = out_features UpperCAmelCase__ : str = out_indices def _a (self ): """simple docstring""" UpperCAmelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : Dict = None if self.use_labels: UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Tuple = self.get_config() return config, pixel_values, labels def _a (self ): """simple docstring""" return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = MaskFormerSwinModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() UpperCAmelCase__ : Dict = model(_lowerCamelCase ) UpperCAmelCase__ : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) UpperCAmelCase__ : List[str] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = MaskFormerSwinBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() UpperCAmelCase__ : Tuple = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(_lowerCamelCase ): UpperCAmelCase__ : Union[str, Any] = ["""stem"""] UpperCAmelCase__ : List[Any] = MaskFormerSwinBackbone(config=_lowerCamelCase ) def _a (self ): """simple docstring""" UpperCAmelCase__ : str = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = config_and_inputs UpperCAmelCase__ : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {} SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def _a (self ): """simple docstring""" UpperCAmelCase__ : List[str] = MaskFormerSwinModelTester(self ) UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=_lowerCamelCase , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( """`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with""" """ `nn.DataParallel`""" ) ) def _a (self ): """simple docstring""" pass def _a (self ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a (self ): """simple docstring""" return def _a (self ): """simple docstring""" UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _a (self ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_lowerCamelCase ) @unittest.skip("""Swin does not use inputs_embeds""" ) def _a (self ): """simple docstring""" pass @unittest.skip("""Swin does not support feedforward chunking""" ) def _a (self ): """simple docstring""" pass def _a (self ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Optional[int] = model_class(_lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase__ : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) ) def _a (self ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Optional[int] = model_class(_lowerCamelCase ) UpperCAmelCase__ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : Any = [*signature.parameters.keys()] UpperCAmelCase__ : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) @unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" ) def _a (self ): """simple docstring""" pass @unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" ) def _a (self ): """simple docstring""" pass def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Dict = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) UpperCAmelCase__ : str = outputs.hidden_states UpperCAmelCase__ : str = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase ) # Swin has a different seq_length UpperCAmelCase__ : Optional[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _a (self ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : int = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: UpperCAmelCase__ : int = True self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase__ : Optional[Any] = True self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def _a (self ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Tuple = 3 UpperCAmelCase__ : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) UpperCAmelCase__ : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase__ : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) UpperCAmelCase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = True self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase__ : List[Any] = True self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) ) @unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" ) def _a (self ): """simple docstring""" pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def _a (self ): """simple docstring""" pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def _a (self ): """simple docstring""" pass def _a (self ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(_lowerCamelCase ): UpperCAmelCase__ : Optional[int] = 0 return t def check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase={} ): with torch.no_grad(): UpperCAmelCase__ : List[Any] = model(**_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ) UpperCAmelCase__ : str = model(**_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ).to_tuple() def recursive_check(_lowerCamelCase , _lowerCamelCase ): if isinstance(_lowerCamelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase , _lowerCamelCase ): recursive_check(_lowerCamelCase , _lowerCamelCase ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(_lowerCamelCase , _lowerCamelCase ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_lowerCamelCase ) , set_nan_tensor_to_zero(_lowerCamelCase ) , atol=1e-5 ) , msg=( """Tuple and dict output are not equal. Difference:""" F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" F""" {torch.isnan(_lowerCamelCase ).any()} and `inf`: {torch.isinf(_lowerCamelCase )}. Dict has""" F""" `nan`: {torch.isnan(_lowerCamelCase ).any()} and `inf`: {torch.isinf(_lowerCamelCase )}.""" ) , ) recursive_check(_lowerCamelCase , _lowerCamelCase ) for model_class in self.all_model_classes: UpperCAmelCase__ : Optional[int] = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() UpperCAmelCase__ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) UpperCAmelCase__ : Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) UpperCAmelCase__ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) UpperCAmelCase__ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) UpperCAmelCase__ : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} ) UpperCAmelCase__ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) UpperCAmelCase__ : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} ) @require_torch class lowerCamelCase ( unittest.TestCase , lowerCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else () SCREAMING_SNAKE_CASE = MaskFormerSwinConfig def _a (self ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = MaskFormerSwinModelTester(self ) def _a (self ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Optional[int] = inputs_dict["""pixel_values"""].shape[0] for backbone_class in self.all_model_classes: UpperCAmelCase__ : Tuple = backbone_class(_lowerCamelCase ) backbone.to(_lowerCamelCase ) backbone.eval() UpperCAmelCase__ : int = backbone(**_lowerCamelCase ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , _lowerCamelCase ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True UpperCAmelCase__ : List[str] = backbone(**_lowerCamelCase , output_hidden_states=_lowerCamelCase ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: UpperCAmelCase__ : List[str] = backbone(**_lowerCamelCase , output_attentions=_lowerCamelCase ) self.assertIsNotNone(outputs.attentions )
171
0
from manim import * class __lowerCAmelCase ( UpperCamelCase__): def _lowercase ( self ) -> List[Any]: '''simple docstring''' a__ : int =Rectangle(height=0.5 , width=0.5 ) a__ : str =Rectangle(height=0.25 , width=0.25 ) a__ : Optional[int] =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) a__ : str =[mem.copy() for i in range(6 )] a__ : int =[mem.copy() for i in range(6 )] a__ : Any =VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) a__ : Optional[int] =VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) a__ : Optional[Any] =VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) a__ : str =Text("CPU" , font_size=2_4 ) a__ : int =Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCAmelCase__ ) a__ : List[str] =[mem.copy() for i in range(4 )] a__ : List[Any] =VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) a__ : int =Text("GPU" , font_size=2_4 ) a__ : Optional[int] =Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ ) gpu.move_to([-1, -1, 0] ) self.add(lowerCAmelCase__ ) a__ : Optional[int] =[mem.copy() for i in range(6 )] a__ : Optional[int] =VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) a__ : Optional[Any] =Text("Model" , font_size=2_4 ) a__ : Optional[Any] =Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ ) model.move_to([3, -1.0, 0] ) self.add(lowerCAmelCase__ ) a__ : Any =[] a__ : List[str] =[] a__ : List[str] =[] for i, rect in enumerate(lowerCAmelCase__ ): rect.set_stroke(lowerCAmelCase__ ) a__ : Dict =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase__ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCAmelCase__ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=lowerCAmelCase__ , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowerCAmelCase__ , buff=0.0 ) self.add(lowerCAmelCase__ ) model_cpu_arr.append(lowerCAmelCase__ ) self.add(*lowerCAmelCase__ , *lowerCAmelCase__ , *lowerCAmelCase__ ) a__ : Dict =[mem.copy() for i in range(6 )] a__ : Dict =VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) a__ : Dict =Text("Loaded Checkpoint" , font_size=2_4 ) a__ : Dict =Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ ) checkpoint.move_to([3, 0.5, 0] ) self.add(lowerCAmelCase__ ) a__ : Tuple =[] a__ : Tuple =[] for i, rect in enumerate(lowerCAmelCase__ ): a__ : Optional[Any] =fill.copy().set_fill(lowerCAmelCase__ , opacity=0.7 ) target.move_to(lowerCAmelCase__ ) ckpt_arr.append(lowerCAmelCase__ ) a__ : Tuple =target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(lowerCAmelCase__ ) self.add(*lowerCAmelCase__ , *lowerCAmelCase__ ) a__ : str =Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) a__ : Dict =MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowerCAmelCase__ , lowerCAmelCase__ ) a__ : List[str] =MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , ) blue_text.next_to(lowerCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(lowerCAmelCase__ ) a__ : List[Any] =MarkupText( F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) a__ : List[str] =[meta_mem.copy() for i in range(6 )] a__ : Dict =[meta_mem.copy() for i in range(6 )] a__ : Optional[int] =VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) a__ : Any =VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) a__ : List[str] =VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) a__ : Union[str, Any] =Text("Disk" , font_size=2_4 ) a__ : Union[str, Any] =Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(lowerCAmelCase__ , run_time=3 ) , Write(lowerCAmelCase__ , run_time=1 ) , Create(lowerCAmelCase__ , run_time=1 ) ) a__ : Optional[Any] =[] for i, rect in enumerate(lowerCAmelCase__ ): a__ : Dict =rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(lowerCAmelCase__ , run_time=1.5 ) ) self.play(*lowerCAmelCase__ ) self.play(FadeOut(lowerCAmelCase__ ) ) a__ : Dict =MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=2_4 ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCAmelCase__ , run_time=3 ) ) self.play( FadeOut(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , *lowerCAmelCase__ ) , ) self.wait()
148
import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features UpperCAmelCase : Tuple = logging.get_logger(__name__) UpperCAmelCase : List[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) UpperCAmelCase : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __lowerCAmelCase : _lowercase : str = field( default=UpperCamelCase__ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(UpperCamelCase__)}) _lowercase : str = field( default=UpperCamelCase__ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""}) _lowercase : int = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) _lowercase : int = field( default=128 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , ) _lowercase : int = field( default=64 , metadata={ """help""": ( """The maximum number of tokens for the question. Questions longer than this will """ """be truncated to this length.""" ) } , ) _lowercase : int = field( default=30 , metadata={ """help""": ( """The maximum length of an answer that can be generated. This is needed because the start """ """and end predictions are not conditioned on one another.""" ) } , ) _lowercase : bool = field( default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""}) _lowercase : bool = field( default=UpperCamelCase__ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""}) _lowercase : float = field( default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""}) _lowercase : int = field( default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""}) _lowercase : int = field( default=0 , metadata={ """help""": ( """language id of input for language-specific xlm models (see""" """ tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)""" ) } , ) _lowercase : int = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""}) class __lowerCAmelCase ( UpperCamelCase__): _lowercase : List[Any] = """train""" _lowercase : Any = """dev""" class __lowerCAmelCase ( UpperCamelCase__): _lowercase : SquadDataTrainingArguments _lowercase : List[SquadFeatures] _lowercase : Split _lowercase : bool def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = Split.train , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = "pt" , ) -> str: '''simple docstring''' a__ : List[Any] =args a__ : int =is_language_sensitive a__ : List[str] =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): try: a__ : str =Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) a__ : Any =mode # Load data features from cache or dataset file a__ : str ="v2" if args.version_2_with_negative else "v1" a__ : str =os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. a__ : Dict =cached_features_file + ".lock" with FileLock(lowerCAmelCase__ ): if os.path.exists(lowerCAmelCase__ ) and not args.overwrite_cache: a__ : Any =time.time() a__ : List[Any] =torch.load(lowerCAmelCase__ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. a__ : str =self.old_features["features"] a__ : str =self.old_features.get("dataset" , lowerCAmelCase__ ) a__ : Optional[int] =self.old_features.get("examples" , lowerCAmelCase__ ) logger.info( F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' " future run" ) else: if mode == Split.dev: a__ : Dict =self.processor.get_dev_examples(args.data_dir ) else: a__ : str =self.processor.get_train_examples(args.data_dir ) a__ , a__ : Union[str, Any] =squad_convert_examples_to_features( examples=self.examples , tokenizer=lowerCAmelCase__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowerCAmelCase__ , ) a__ : Any =time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowerCAmelCase__ , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ) -> Tuple: '''simple docstring''' return len(self.features ) def __getitem__( self , lowerCAmelCase__ ) -> Dict[str, torch.Tensor]: '''simple docstring''' a__ : str =self.features[i] a__ : Optional[Any] =torch.tensor(feature.input_ids , dtype=torch.long ) a__ : List[Any] =torch.tensor(feature.attention_mask , dtype=torch.long ) a__ : int =torch.tensor(feature.token_type_ids , dtype=torch.long ) a__ : List[str] =torch.tensor(feature.cls_index , dtype=torch.long ) a__ : int =torch.tensor(feature.p_mask , dtype=torch.float ) a__ : Tuple =torch.tensor(feature.is_impossible , dtype=torch.float ) a__ : Tuple ={ "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask} ) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible} ) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: a__ : int =torch.tensor(feature.start_position , dtype=torch.long ) a__ : Any =torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"start_positions": start_positions, "end_positions": end_positions} ) return inputs
148
1
from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) def UpperCamelCase_( lowerCamelCase_ ) -> List[List[ImageInput]]: if isinstance(lowerCamelCase_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowerCamelCase_ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowerCamelCase_ ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class _lowerCamelCase( _a ): lowercase_ : Union[str, Any] = ["""pixel_values"""] def __init__( self, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = PILImageResampling.BILINEAR, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = True, lowerCamelCase = 1 / 2_55, lowerCamelCase = True, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = None, **lowerCamelCase, ) -> None: """simple docstring""" super().__init__(**lowerCamelCase) _lowercase : str = size if size is not None else {'shortest_edge': 2_56} _lowercase : Any = get_size_dict(lowerCamelCase, default_to_square=lowerCamelCase) _lowercase : List[str] = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24} _lowercase : Optional[Any] = get_size_dict(lowerCamelCase, param_name='crop_size') _lowercase : Optional[int] = do_resize _lowercase : Tuple = size _lowercase : Any = do_center_crop _lowercase : Any = crop_size _lowercase : int = resample _lowercase : int = do_rescale _lowercase : str = rescale_factor _lowercase : Tuple = offset _lowercase : List[str] = do_normalize _lowercase : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = PILImageResampling.BILINEAR, lowerCamelCase = None, **lowerCamelCase, ) -> np.ndarray: """simple docstring""" _lowercase : Union[str, Any] = get_size_dict(lowerCamelCase, default_to_square=lowerCamelCase) if "shortest_edge" in size: _lowercase : Dict = get_resize_output_image_size(lowerCamelCase, size['shortest_edge'], default_to_square=lowerCamelCase) elif "height" in size and "width" in size: _lowercase : List[Any] = (size['height'], size['width']) else: raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''') return resize(lowerCamelCase, size=lowerCamelCase, resample=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, **lowerCamelCase, ) -> np.ndarray: """simple docstring""" _lowercase : Dict = get_size_dict(lowerCamelCase) if "height" not in size or "width" not in size: raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''') return center_crop(lowerCamelCase, size=(size['height'], size['width']), data_format=lowerCamelCase, **lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = True, lowerCamelCase = None, **lowerCamelCase, ) -> Dict: """simple docstring""" _lowercase : str = image.astype(np.floataa) if offset: _lowercase : List[str] = image - (scale / 2) return rescale(lowerCamelCase, scale=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, **lowerCamelCase, ) -> np.ndarray: """simple docstring""" return normalize(lowerCamelCase, mean=lowerCamelCase, std=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = ChannelDimension.FIRST, ) -> np.ndarray: """simple docstring""" if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.') if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') if offset and not do_rescale: raise ValueError('For offset, do_rescale must also be set to True.') # All transformations expect numpy arrays. _lowercase : Tuple = to_numpy_array(lowerCamelCase) if do_resize: _lowercase : Optional[Any] = self.resize(image=lowerCamelCase, size=lowerCamelCase, resample=lowerCamelCase) if do_center_crop: _lowercase : Dict = self.center_crop(lowerCamelCase, size=lowerCamelCase) if do_rescale: _lowercase : Dict = self.rescale(image=lowerCamelCase, scale=lowerCamelCase, offset=lowerCamelCase) if do_normalize: _lowercase : Optional[Any] = self.normalize(image=lowerCamelCase, mean=lowerCamelCase, std=lowerCamelCase) _lowercase : Tuple = to_channel_dimension_format(lowerCamelCase, lowerCamelCase) return image def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = ChannelDimension.FIRST, **lowerCamelCase, ) -> PIL.Image.Image: """simple docstring""" _lowercase : Any = do_resize if do_resize is not None else self.do_resize _lowercase : Optional[int] = resample if resample is not None else self.resample _lowercase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop _lowercase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale _lowercase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor _lowercase : Optional[int] = offset if offset is not None else self.offset _lowercase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize _lowercase : Any = image_mean if image_mean is not None else self.image_mean _lowercase : Union[str, Any] = image_std if image_std is not None else self.image_std _lowercase : Union[str, Any] = size if size is not None else self.size _lowercase : Any = get_size_dict(lowerCamelCase, default_to_square=lowerCamelCase) _lowercase : List[str] = crop_size if crop_size is not None else self.crop_size _lowercase : Any = get_size_dict(lowerCamelCase, param_name='crop_size') if not valid_images(lowerCamelCase): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') _lowercase : List[str] = make_batched(lowerCamelCase) _lowercase : Any = [ [ self._preprocess_image( image=lowerCamelCase, do_resize=lowerCamelCase, size=lowerCamelCase, resample=lowerCamelCase, do_center_crop=lowerCamelCase, crop_size=lowerCamelCase, do_rescale=lowerCamelCase, rescale_factor=lowerCamelCase, offset=lowerCamelCase, do_normalize=lowerCamelCase, image_mean=lowerCamelCase, image_std=lowerCamelCase, data_format=lowerCamelCase, ) for img in video ] for video in videos ] _lowercase : Dict = {'pixel_values': videos} return BatchFeature(data=lowerCamelCase, tensor_type=lowerCamelCase)
21
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : int = logging.get_logger(__name__) __A : Tuple = { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json", "google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json", "google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class A_ (a_ ): UpperCAmelCase__ = '''big_bird''' def __init__( self , _A=5_0_3_5_8 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=4_0_9_6 , _A=2 , _A=0.02 , _A=1E-12 , _A=True , _A=0 , _A=1 , _A=2 , _A=6_6 , _A="block_sparse" , _A=True , _A=False , _A=6_4 , _A=3 , _A=None , **_A , ): '''simple docstring''' super().__init__( pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , sep_token_id=_A , **_A , ) UpperCAmelCase = vocab_size UpperCAmelCase = max_position_embeddings UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = initializer_range UpperCAmelCase = type_vocab_size UpperCAmelCase = layer_norm_eps UpperCAmelCase = use_cache UpperCAmelCase = rescale_embeddings UpperCAmelCase = attention_type UpperCAmelCase = use_bias UpperCAmelCase = block_size UpperCAmelCase = num_random_blocks UpperCAmelCase = classifier_dropout class A_ (a_ ): @property def _lowercase ( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
273
0
"""simple docstring""" __lowercase = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] __lowercase = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] __lowercase = { 0: '''Sunday''', 1: '''Monday''', 2: '''Tuesday''', 3: '''Wednesday''', 4: '''Thursday''', 5: '''Friday''', 6: '''Saturday''', } def lowerCAmelCase (__UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ): """simple docstring""" assert len(str(__UpperCamelCase ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 1_2, "month should be between 1 to 12" assert 1 <= day <= 3_1, "day should be between 1 to 31" # Doomsday algorithm: __UpperCamelCase =year // 1_0_0 __UpperCamelCase =(5 * (century % 4) + 2) % 7 __UpperCamelCase =year % 1_0_0 __UpperCamelCase =centurian % 1_2 __UpperCamelCase =( (centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 __UpperCamelCase =( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0) else DOOMSDAY_LEAP[month - 1] ) __UpperCamelCase =(dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
85
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig __lowercase = { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''', } class _lowercase ( __a ): """simple docstring""" lowercase__ = '''albert''' def __init__( self : List[Any] , UpperCamelCase__ : List[Any]=30000 , UpperCamelCase__ : int=128 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Union[str, Any]=64 , UpperCamelCase__ : Any=16384 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Optional[int]="gelu_new" , UpperCamelCase__ : int=0 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Tuple=1E-12 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[Any]=3 , **UpperCamelCase__ : List[str] , ) -> Dict: '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) __UpperCamelCase =vocab_size __UpperCamelCase =embedding_size __UpperCamelCase =hidden_size __UpperCamelCase =num_hidden_layers __UpperCamelCase =num_hidden_groups __UpperCamelCase =num_attention_heads __UpperCamelCase =inner_group_num __UpperCamelCase =hidden_act __UpperCamelCase =intermediate_size __UpperCamelCase =hidden_dropout_prob __UpperCamelCase =attention_probs_dropout_prob __UpperCamelCase =max_position_embeddings __UpperCamelCase =type_vocab_size __UpperCamelCase =initializer_range __UpperCamelCase =layer_norm_eps __UpperCamelCase =classifier_dropout_prob __UpperCamelCase =position_embedding_type class _lowercase ( __a ): """simple docstring""" @property def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": __UpperCamelCase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __UpperCamelCase ={0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
85
1
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowerCamelCase : int ="bert-base-cased" lowerCamelCase : Union[str, Any] ="google/pegasus-xsum" lowerCamelCase : Any =[" Sam ate lunch today.", "Sams lunch ingredients."] lowerCamelCase : Dict =["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"] lowerCamelCase : List[str] ="patrickvonplaten/t5-tiny-random" lowerCamelCase : str ="sshleifer/bart-tiny-random" lowerCamelCase : Union[str, Any] ="sshleifer/tiny-mbart" lowerCamelCase : Optional[int] ="sshleifer/tiny-marian-en-de" def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int: UpperCamelCase__ : Optional[Any] = "\n".join(_A ) Path(_A ).open("w" ).writelines(_A ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Any: for split in ["train", "val", "test"]: _dump_articles(os.path.join(_A , f'{split}.source' ) , _A ) _dump_articles(os.path.join(_A , f'{split}.target' ) , _A ) return tmp_dir class __a ( UpperCAmelCase__ ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' UpperCamelCase__ : Any = AutoTokenizer.from_pretrained(__lowercase ) UpperCamelCase__ : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) UpperCamelCase__ : int = max(len(tokenizer.encode(__lowercase ) ) for a in ARTICLES ) UpperCamelCase__ : Union[str, Any] = max(len(tokenizer.encode(__lowercase ) ) for a in SUMMARIES ) UpperCamelCase__ : Any = 4 UpperCamelCase__ : Optional[int] = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. UpperCamelCase__ : Optional[int] = SeqaSeqDataset( __lowercase , data_dir=__lowercase , type_path="train" , max_source_length=__lowercase , max_target_length=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase , ) UpperCamelCase__ : str = DataLoader(__lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(__lowercase , __lowercase ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place UpperCamelCase__ : Optional[int] = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' UpperCamelCase__ : List[Any] = AutoTokenizer.from_pretrained(__lowercase ) UpperCamelCase__ : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) UpperCamelCase__ : List[str] = max(len(tokenizer.encode(__lowercase ) ) for a in ARTICLES ) UpperCamelCase__ : Tuple = max(len(tokenizer.encode(__lowercase ) ) for a in SUMMARIES ) UpperCamelCase__ : Optional[int] = 4 UpperCamelCase__ : str = LegacySeqaSeqDataset( __lowercase , data_dir=__lowercase , type_path="train" , max_source_length=20 , max_target_length=__lowercase , ) UpperCamelCase__ : Dict = DataLoader(__lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCamelCase__ : str = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" ) UpperCamelCase__ : Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) UpperCamelCase__ : List[Any] = tmp_dir.joinpath("train.source" ).open().readlines() UpperCamelCase__ : Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(__lowercase , __lowercase , 1_28 , __lowercase ) UpperCamelCase__ : Any = {x.name for x in tmp_dir.iterdir()} UpperCamelCase__ : Tuple = {x.name for x in save_dir.iterdir()} UpperCamelCase__ : Optional[Any] = save_dir.joinpath("train.source" ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(__lowercase ) < len(__lowercase ) assert len(__lowercase ) == 1 assert len(packed_examples[0] ) == sum(len(__lowercase ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' if not FAIRSEQ_AVAILABLE: return UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Dict = self._get_dataset(max_len=64 ) UpperCamelCase__ : str = 64 UpperCamelCase__ : Optional[int] = ds.make_dynamic_sampler(__lowercase , required_batch_size_multiple=__lowercase ) UpperCamelCase__ : Dict = [len(__lowercase ) for x in batch_sampler] assert len(set(__lowercase ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(__lowercase ) == len(__lowercase ) # no dropped or added examples UpperCamelCase__ : Optional[Any] = DataLoader(__lowercase , batch_sampler=__lowercase , collate_fn=ds.collate_fn , num_workers=2 ) UpperCamelCase__ : Optional[int] = [] UpperCamelCase__ : List[str] = [] for batch in data_loader: UpperCamelCase__ : Any = batch["input_ids"].shape UpperCamelCase__ : Dict = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple UpperCamelCase__ : str = np.product(batch["input_ids"].shape ) num_src_per_batch.append(__lowercase ) if num_src_tokens > (max_tokens * 1.1): failures.append(__lowercase ) assert num_src_per_batch[0] == max(__lowercase ) if failures: raise AssertionError(F'too many tokens in {len(__lowercase )} batches' ) def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Dict = self._get_dataset(max_len=5_12 ) UpperCamelCase__ : Optional[Any] = 2 UpperCamelCase__ : Tuple = ds.make_sortish_sampler(__lowercase , shuffle=__lowercase ) UpperCamelCase__ : Optional[Any] = DataLoader(__lowercase , batch_size=__lowercase , collate_fn=ds.collate_fn , num_workers=2 ) UpperCamelCase__ : Union[str, Any] = DataLoader(__lowercase , batch_size=__lowercase , collate_fn=ds.collate_fn , num_workers=2 , sampler=__lowercase ) UpperCamelCase__ : List[str] = tokenizer.pad_token_id def count_pad_tokens(SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str]="input_ids" ): return [batch[k].eq(__lowercase ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(__lowercase , k="labels" ) ) < sum(count_pad_tokens(__lowercase , k="labels" ) ) assert sum(count_pad_tokens(__lowercase ) ) < sum(count_pad_tokens(__lowercase ) ) assert len(__lowercase ) == len(__lowercase ) def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Tuple=10_00 , SCREAMING_SNAKE_CASE : Dict=1_28 ): '''simple docstring''' if os.getenv("USE_REAL_DATA" , __lowercase ): UpperCamelCase__ : str = "examples/seq2seq/wmt_en_ro" UpperCamelCase__ : List[Any] = max_len * 2 * 64 if not Path(__lowercase ).joinpath("train.len" ).exists(): save_len_file(__lowercase , __lowercase ) else: UpperCamelCase__ : Optional[Any] = "examples/seq2seq/test_data/wmt_en_ro" UpperCamelCase__ : Any = max_len * 4 save_len_file(__lowercase , __lowercase ) UpperCamelCase__ : Dict = AutoTokenizer.from_pretrained(__lowercase ) UpperCamelCase__ : Any = SeqaSeqDataset( __lowercase , data_dir=__lowercase , type_path="train" , max_source_length=__lowercase , max_target_length=__lowercase , n_obs=__lowercase , ) return ds, max_tokens, tokenizer def __lowercase ( self : int ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Any = self._get_dataset() UpperCamelCase__ : List[Any] = set(DistributedSortishSampler(__lowercase , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=__lowercase ) ) UpperCamelCase__ : Any = set(DistributedSortishSampler(__lowercase , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=__lowercase ) ) assert idsa.intersection(__lowercase ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' UpperCamelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(__lowercase , use_fast=__lowercase ) if tok_name == MBART_TINY: UpperCamelCase__ : Any = SeqaSeqDataset( __lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , ) UpperCamelCase__ : str = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: UpperCamelCase__ : List[str] = SeqaSeqDataset( __lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , ) UpperCamelCase__ : List[str] = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(__lowercase ) == 1 if tok_name == BART_TINY else len(__lowercase ) == 0
189
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING lowercase__ : Dict = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase__ ) class UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Optional[Any] , **__lowercase : Union[str, Any] ): """simple docstring""" super().__init__(**__lowercase ) requires_backends(self , "vision" ) requires_backends(self , "torch" ) if self.framework != "pt": raise ValueError(f"The {self.__class__} is only available in PyTorch." ) self.check_model_type(__lowercase ) def snake_case__ ( self : Optional[int] , **__lowercase : Optional[Any] ): """simple docstring""" snake_case_ = {} snake_case_ = {} snake_case_ = {} # preprocess args if "points_per_batch" in kwargs: snake_case_ = kwargs["points_per_batch"] if "points_per_crop" in kwargs: snake_case_ = kwargs["points_per_crop"] if "crops_n_layers" in kwargs: snake_case_ = kwargs["crops_n_layers"] if "crop_overlap_ratio" in kwargs: snake_case_ = kwargs["crop_overlap_ratio"] if "crop_n_points_downscale_factor" in kwargs: snake_case_ = kwargs["crop_n_points_downscale_factor"] # postprocess args if "pred_iou_thresh" in kwargs: snake_case_ = kwargs["pred_iou_thresh"] if "stability_score_offset" in kwargs: snake_case_ = kwargs["stability_score_offset"] if "mask_threshold" in kwargs: snake_case_ = kwargs["mask_threshold"] if "stability_score_thresh" in kwargs: snake_case_ = kwargs["stability_score_thresh"] if "crops_nms_thresh" in kwargs: snake_case_ = kwargs["crops_nms_thresh"] if "output_rle_mask" in kwargs: snake_case_ = kwargs["output_rle_mask"] if "output_bboxes_mask" in kwargs: snake_case_ = kwargs["output_bboxes_mask"] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self : Optional[int] , __lowercase : List[str] , *__lowercase : Optional[Any] , __lowercase : Dict=None , __lowercase : List[str]=None , **__lowercase : Optional[Any] ): """simple docstring""" return super().__call__(__lowercase , *__lowercase , num_workers=__lowercase , batch_size=__lowercase , **__lowercase ) def snake_case__ ( self : str , __lowercase : int , __lowercase : List[str]=64 , __lowercase : int = 0 , __lowercase : float = 5_12 / 15_00 , __lowercase : Optional[int] = 32 , __lowercase : Optional[int] = 1 , ): """simple docstring""" snake_case_ = load_image(__lowercase ) snake_case_ = self.image_processor.size["longest_edge"] snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.image_processor.generate_crop_boxes( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) snake_case_ = self.image_processor(images=__lowercase , return_tensors="pt" ) with self.device_placement(): if self.framework == "pt": snake_case_ = self.get_inference_context() with inference_context(): snake_case_ = self._ensure_tensor_on_device(__lowercase , device=self.device ) snake_case_ = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) ) snake_case_ = image_embeddings snake_case_ = grid_points.shape[1] snake_case_ = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( "Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. " "To return all points at once, set points_per_batch to None" ) for i in range(0 , __lowercase , __lowercase ): snake_case_ = grid_points[:, i : i + points_per_batch, :, :] snake_case_ = input_labels[:, i : i + points_per_batch] snake_case_ = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def snake_case__ ( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Union[str, Any]=0.88 , __lowercase : Union[str, Any]=0.95 , __lowercase : int=0 , __lowercase : int=1 , ): """simple docstring""" snake_case_ = model_inputs.pop("input_boxes" ) snake_case_ = model_inputs.pop("is_last" ) snake_case_ = model_inputs.pop("original_sizes" ).tolist() snake_case_ = model_inputs.pop("reshaped_input_sizes" ).tolist() snake_case_ = self.model(**__lowercase ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks snake_case_ = model_outputs["pred_masks"] snake_case_ = self.image_processor.post_process_masks( __lowercase , __lowercase , __lowercase , __lowercase , binarize=__lowercase ) snake_case_ = model_outputs["iou_scores"] snake_case_ , snake_case_ , snake_case_ = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __lowercase , __lowercase , __lowercase , __lowercase , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def snake_case__ ( self : str , __lowercase : Any , __lowercase : Optional[int]=False , __lowercase : int=False , __lowercase : List[str]=0.7 , ): """simple docstring""" snake_case_ = [] snake_case_ = [] snake_case_ = [] for model_output in model_outputs: all_scores.append(model_output.pop("iou_scores" ) ) all_masks.extend(model_output.pop("masks" ) ) all_boxes.append(model_output.pop("boxes" ) ) snake_case_ = torch.cat(__lowercase ) snake_case_ = torch.cat(__lowercase ) snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.image_processor.post_process_for_mask_generation( __lowercase , __lowercase , __lowercase , __lowercase ) snake_case_ = defaultdict(__lowercase ) for output in model_outputs: for k, v in output.items(): extra[k].append(__lowercase ) snake_case_ = {} if output_rle_mask: snake_case_ = rle_mask if output_bboxes_mask: snake_case_ = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
187
0
"""simple docstring""" import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def _A ( ): """simple docstring""" A = ArgumentParser( description=( """PyTorch TPU distributed training launch """ """helper utility that will spawn up """ """multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=_a , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=_a , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=_a ) return parser.parse_args() def _A ( ): """simple docstring""" A = parse_args() # Import training_script as a module. A = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) A = script_fpath.stem A = importlib.import_module(_a ) # Patch sys.argv A = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
77
"""simple docstring""" import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": UpperCAmelCase =argparse.ArgumentParser() parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--txt2img_unclip", default="kakaobrain/karlo-v1-alpha", type=str, required=False, help="The pretrained txt2img unclip.", ) UpperCAmelCase =parser.parse_args() UpperCAmelCase =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) UpperCAmelCase =CLIPImageProcessor() UpperCAmelCase =CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14") UpperCAmelCase =UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
77
1
from __future__ import annotations def _a ( a :list[int] , a :list[int] , a :int ) -> tuple[float, list[float]]: a = list(range(len(_UpperCamelCase ) ) ) a = [v / w for v, w in zip(_UpperCamelCase , _UpperCamelCase )] index.sort(key=lambda a : ratio[i] , reverse=_UpperCamelCase ) a = 0 a = [0] * len(_UpperCamelCase ) for i in index: if weight[i] <= capacity: a = 1 max_value += value[i] capacity -= weight[i] else: a = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
0
"""simple docstring""" import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } SCREAMING_SNAKE_CASE__ = { "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } SCREAMING_SNAKE_CASE__ = {"facebook/blenderbot-3B": 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCAmelCase__ ( ) -> str: """simple docstring""" snake_case = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) snake_case = bs[:] snake_case = 0 for b in range(2**8 ): if b not in bs: bs.append(_UpperCamelCase ) cs.append(2**8 + n ) n += 1 snake_case = [chr(_UpperCamelCase ) for n in cs] return dict(zip(_UpperCamelCase , _UpperCamelCase ) ) def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> Union[str, Any]: """simple docstring""" snake_case = set() snake_case = word[0] for char in word[1:]: pairs.add((prev_char, char) ) snake_case = char return pairs class lowerCAmelCase_ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase : str = VOCAB_FILES_NAMES _lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase : Optional[Any] = ["""input_ids""", """attention_mask"""] def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase="replace" , lowerCAmelCase="<s>" , lowerCAmelCase="</s>" , lowerCAmelCase="</s>" , lowerCAmelCase="<s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase="<mask>" , lowerCAmelCase=False , **lowerCAmelCase , ): """simple docstring""" snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else bos_token snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else eos_token snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else sep_token snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else cls_token snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else unk_token snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token super().__init__( errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , **lowerCAmelCase , ) with open(lowerCAmelCase , encoding='utf-8' ) as vocab_handle: snake_case = json.load(lowerCAmelCase ) snake_case = {v: k for k, v in self.encoder.items()} snake_case = errors # how to handle errors in decoding snake_case = bytes_to_unicode() snake_case = {v: k for k, v in self.byte_encoder.items()} with open(lowerCAmelCase , encoding='utf-8' ) as merges_handle: snake_case = merges_handle.read().split('\n' )[1:-1] snake_case = [tuple(merge.split() ) for merge in bpe_merges] snake_case = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) ) snake_case = {} snake_case = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions snake_case = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def snake_case ( self ): """simple docstring""" return len(self.encoder ) def snake_case ( self ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def snake_case ( self , lowerCAmelCase ): """simple docstring""" if token in self.cache: return self.cache[token] snake_case = tuple(lowerCAmelCase ) snake_case = get_pairs(lowerCAmelCase ) if not pairs: return token while True: snake_case = min(lowerCAmelCase , key=lambda lowerCAmelCase : self.bpe_ranks.get(lowerCAmelCase , float('inf' ) ) ) if bigram not in self.bpe_ranks: break snake_case ,snake_case = bigram snake_case = [] snake_case = 0 while i < len(lowerCAmelCase ): try: snake_case = word.index(lowerCAmelCase , lowerCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) snake_case = j if word[i] == first and i < len(lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 snake_case = tuple(lowerCAmelCase ) snake_case = new_word if len(lowerCAmelCase ) == 1: break else: snake_case = get_pairs(lowerCAmelCase ) snake_case = ' '.join(lowerCAmelCase ) snake_case = word return word def snake_case ( self , lowerCAmelCase ): """simple docstring""" snake_case = [] for token in re.findall(self.pat , lowerCAmelCase ): snake_case = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase ).split(' ' ) ) return bpe_tokens def snake_case ( self , lowerCAmelCase ): """simple docstring""" return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token ) ) def snake_case ( self , lowerCAmelCase ): """simple docstring""" return self.decoder.get(lowerCAmelCase ) def snake_case ( self , lowerCAmelCase ): """simple docstring""" snake_case = ''.join(lowerCAmelCase ) snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case = os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) snake_case = os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase ) + '\n' ) snake_case = 0 with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!' ) snake_case = token_index writer.write(' '.join(lowerCAmelCase ) + '\n' ) index += 1 return vocab_file, merge_file def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase )) + [1] return [1] + ([0] * len(lowerCAmelCase )) + [1, 1] + ([0] * len(lowerCAmelCase )) + [1] def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ): """simple docstring""" snake_case = [self.sep_token_id] snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def snake_case ( self , lowerCAmelCase , lowerCAmelCase=False , **lowerCAmelCase ): """simple docstring""" snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase ) > 0 and not text[0].isspace()): snake_case = ' ' + text return (text, kwargs) def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ): """simple docstring""" return token_ids_a + [self.eos_token_id] def snake_case ( self , lowerCAmelCase ): """simple docstring""" snake_case = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(lowerCAmelCase ) snake_case = ' '.join(lowerCAmelCase ) snake_case = self.encode(lowerCAmelCase ) if len(lowerCAmelCase ) > self.model_max_length: snake_case = input_ids[-self.model_max_length :] logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
150
0
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ): __snake_case : List[str] = StableDiffusionDiffEditPipeline __snake_case : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""} __snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""} __snake_case : Optional[Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __snake_case : Tuple = frozenset([] ) def A ( self : str ): torch.manual_seed(0 ) lowerCAmelCase_ : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase , ) lowerCAmelCase_ : Tuple = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , ) lowerCAmelCase_ : List[Any] = DDIMInverseScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase , set_alpha_to_zero=UpperCAmelCase , ) torch.manual_seed(0 ) lowerCAmelCase_ : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) lowerCAmelCase_ : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , ) lowerCAmelCase_ : int = CLIPTextModel(UpperCAmelCase ) lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCAmelCase_ : Dict = { """unet""": unet, """scheduler""": scheduler, """inverse_scheduler""": inverse_scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[str]=0 ): lowerCAmelCase_ : Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase ) lowerCAmelCase_ : Union[str, Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase ) if str(UpperCAmelCase ).startswith("""mps""" ): lowerCAmelCase_ : List[Any] = torch.manual_seed(UpperCAmelCase ) else: lowerCAmelCase_ : int = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase ) lowerCAmelCase_ : Optional[Any] = { """prompt""": """a dog and a newt""", """mask_image""": mask, """image_latents""": latents, """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def A ( self : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=0 ): lowerCAmelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase ) lowerCAmelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase_ : str = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert("""RGB""" ) if str(UpperCAmelCase ).startswith("""mps""" ): lowerCAmelCase_ : int = torch.manual_seed(UpperCAmelCase ) else: lowerCAmelCase_ : Any = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase ) lowerCAmelCase_ : List[Any] = { """image""": image, """source_prompt""": """a cat and a frog""", """target_prompt""": """a dog and a newt""", """generator""": generator, """num_inference_steps""": 2, """num_maps_per_mask""": 2, """mask_encode_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def A ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[Any]=0 ): lowerCAmelCase_ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase ) lowerCAmelCase_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase_ : Tuple = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert("""RGB""" ) if str(UpperCAmelCase ).startswith("""mps""" ): lowerCAmelCase_ : Optional[Any] = torch.manual_seed(UpperCAmelCase ) else: lowerCAmelCase_ : str = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase ) lowerCAmelCase_ : Tuple = { """image""": image, """prompt""": """a cat and a frog""", """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """decode_latents""": True, """output_type""": """numpy""", } return inputs def A ( self : Any ): if not hasattr(self.pipeline_class , """_optional_components""" ): return lowerCAmelCase_ : List[str] = self.get_dummy_components() lowerCAmelCase_ : Optional[Any] = self.pipeline_class(**UpperCAmelCase ) pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) lowerCAmelCase_ : str = self.get_dummy_inputs(UpperCAmelCase ) lowerCAmelCase_ : Any = pipe(**UpperCAmelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(UpperCAmelCase ) lowerCAmelCase_ : List[str] = self.pipeline_class.from_pretrained(UpperCAmelCase ) pipe_loaded.to(UpperCAmelCase ) pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase ) for optional_component in pipe._optional_components: self.assertTrue( getattr(UpperCAmelCase , UpperCAmelCase ) is None , F'`{optional_component}` did not stay set to None after loading.' , ) lowerCAmelCase_ : Any = self.get_dummy_inputs(UpperCAmelCase ) lowerCAmelCase_ : Tuple = pipe_loaded(**UpperCAmelCase )[0] lowerCAmelCase_ : Any = np.abs(output - output_loaded ).max() self.assertLess(UpperCAmelCase , 1e-4 ) def A ( self : Any ): lowerCAmelCase_ : Optional[Any] = """cpu""" lowerCAmelCase_ : Optional[Any] = self.get_dummy_components() lowerCAmelCase_ : Tuple = self.pipeline_class(**UpperCAmelCase ) pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) lowerCAmelCase_ : Tuple = self.get_dummy_mask_inputs(UpperCAmelCase ) lowerCAmelCase_ : List[str] = pipe.generate_mask(**UpperCAmelCase ) lowerCAmelCase_ : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) lowerCAmelCase_ : Any = np.array([0] * 9 ) lowerCAmelCase_ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCAmelCase , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def A ( self : int ): lowerCAmelCase_ : Optional[Any] = """cpu""" lowerCAmelCase_ : Optional[int] = self.get_dummy_components() lowerCAmelCase_ : int = self.pipeline_class(**UpperCAmelCase ) pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) lowerCAmelCase_ : Dict = self.get_dummy_inversion_inputs(UpperCAmelCase ) lowerCAmelCase_ : Tuple = pipe.invert(**UpperCAmelCase ).images lowerCAmelCase_ : Any = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowerCAmelCase_ : List[Any] = np.array( [0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , ) lowerCAmelCase_ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCAmelCase , 1e-3 ) def A ( self : str ): super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def A ( self : Tuple ): lowerCAmelCase_ : str = """cpu""" lowerCAmelCase_ : int = self.get_dummy_components() lowerCAmelCase_ : Optional[Any] = {"""beta_start""": 0.0_0085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""} lowerCAmelCase_ : Dict = DPMSolverMultistepScheduler(**UpperCAmelCase ) lowerCAmelCase_ : Any = DPMSolverMultistepInverseScheduler(**UpperCAmelCase ) lowerCAmelCase_ : List[Any] = self.pipeline_class(**UpperCAmelCase ) pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) lowerCAmelCase_ : Tuple = self.get_dummy_inversion_inputs(UpperCAmelCase ) lowerCAmelCase_ : int = pipe.invert(**UpperCAmelCase ).images lowerCAmelCase_ : str = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowerCAmelCase_ : List[str] = np.array( [0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , ) lowerCAmelCase_ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCAmelCase , 1e-3 ) @require_torch_gpu @slow class __a ( unittest.TestCase ): def A ( self : int ): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def A ( cls : Optional[Any] ): lowerCAmelCase_ : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" ) lowerCAmelCase_ : Any = raw_image.convert("""RGB""" ).resize((7_68, 7_68) ) lowerCAmelCase_ : Union[str, Any] = raw_image def A ( self : int ): lowerCAmelCase_ : Any = torch.manual_seed(0 ) lowerCAmelCase_ : List[Any] = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa ) lowerCAmelCase_ : Union[str, Any] = DDIMScheduler.from_config(pipe.scheduler.config ) lowerCAmelCase_ : Tuple = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=UpperCAmelCase ) lowerCAmelCase_ : Tuple = """a bowl of fruit""" lowerCAmelCase_ : Any = """a bowl of pears""" lowerCAmelCase_ : Dict = pipe.generate_mask( image=self.raw_image , source_prompt=UpperCAmelCase , target_prompt=UpperCAmelCase , generator=UpperCAmelCase , ) lowerCAmelCase_ : int = pipe.invert( prompt=UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCAmelCase ).latents lowerCAmelCase_ : str = pipe( prompt=UpperCAmelCase , mask_image=UpperCAmelCase , image_latents=UpperCAmelCase , generator=UpperCAmelCase , negative_prompt=UpperCAmelCase , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0] lowerCAmelCase_ : Optional[Any] = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((7_68, 7_68) ) ) / 2_55 ) assert np.abs((expected_image - image).max() ) < 5e-1 def A ( self : Union[str, Any] ): lowerCAmelCase_ : Dict = torch.manual_seed(0 ) lowerCAmelCase_ : List[Any] = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa ) lowerCAmelCase_ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) lowerCAmelCase_ : int = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=UpperCAmelCase ) lowerCAmelCase_ : int = """a bowl of fruit""" lowerCAmelCase_ : Dict = """a bowl of pears""" lowerCAmelCase_ : Dict = pipe.generate_mask( image=self.raw_image , source_prompt=UpperCAmelCase , target_prompt=UpperCAmelCase , generator=UpperCAmelCase , ) lowerCAmelCase_ : List[Any] = pipe.invert( prompt=UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCAmelCase , num_inference_steps=25 , ).latents lowerCAmelCase_ : Optional[Any] = pipe( prompt=UpperCAmelCase , mask_image=UpperCAmelCase , image_latents=UpperCAmelCase , generator=UpperCAmelCase , negative_prompt=UpperCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0] lowerCAmelCase_ : Optional[int] = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((7_68, 7_68) ) ) / 2_55 ) assert np.abs((expected_image - image).max() ) < 5e-1
359
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def __UpperCamelCase ( lowercase__ : Optional[Any] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ : Optional[int] = """huggingface/label-files""" lowerCAmelCase_ : int = """imagenet-1k-id2label.json""" lowerCAmelCase_ : List[str] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) ) lowerCAmelCase_ : Tuple = {int(lowercase__ ): v for k, v in idalabel.items()} lowerCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()} lowerCAmelCase_ : Optional[Any] = """std_conv""" if """bit""" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" lowerCAmelCase_ : Tuple = BitConfig( conv_layer=lowercase__ , num_labels=1000 , idalabel=lowercase__ , labelaid=lowercase__ , ) return config def __UpperCamelCase ( lowercase__ : List[Any] ) -> Optional[int]: '''simple docstring''' if "stem.conv" in name: lowerCAmelCase_ : str = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: lowerCAmelCase_ : Tuple = name.replace("""blocks""" , """layers""" ) if "head.fc" in name: lowerCAmelCase_ : Dict = name.replace("""head.fc""" , """classifier.1""" ) if name.startswith("""norm""" ): lowerCAmelCase_ : List[str] = """bit.""" + name if "bit" not in name and "classifier" not in name: lowerCAmelCase_ : Any = """bit.encoder.""" + name return name def __UpperCamelCase ( ) -> str: '''simple docstring''' lowerCAmelCase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCAmelCase_ : List[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return im @torch.no_grad() def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Any , lowercase__ : Any=False ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ : Optional[Any] = get_config(lowercase__ ) # load original model from timm lowerCAmelCase_ : str = create_model(lowercase__ , pretrained=lowercase__ ) timm_model.eval() # load state_dict of original model lowerCAmelCase_ : Any = timm_model.state_dict() for key in state_dict.copy().keys(): lowerCAmelCase_ : List[str] = state_dict.pop(lowercase__ ) lowerCAmelCase_ : Dict = val.squeeze() if """head""" in key else val # load HuggingFace model lowerCAmelCase_ : Tuple = BitForImageClassification(lowercase__ ) model.eval() model.load_state_dict(lowercase__ ) # create image processor lowerCAmelCase_ : Tuple = create_transform(**resolve_data_config({} , model=lowercase__ ) ) lowerCAmelCase_ : Union[str, Any] = transform.transforms lowerCAmelCase_ : str = { """bilinear""": PILImageResampling.BILINEAR, """bicubic""": PILImageResampling.BICUBIC, """nearest""": PILImageResampling.NEAREST, } lowerCAmelCase_ : List[str] = BitImageProcessor( do_resize=lowercase__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=lowercase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowerCAmelCase_ : int = prepare_img() lowerCAmelCase_ : Tuple = transform(lowercase__ ).unsqueeze(0 ) lowerCAmelCase_ : List[str] = processor(lowercase__ , return_tensors="""pt""" ).pixel_values # verify pixel values assert torch.allclose(lowercase__ , lowercase__ ) # verify logits with torch.no_grad(): lowerCAmelCase_ : Tuple = model(lowercase__ ) lowerCAmelCase_ : List[str] = outputs.logits print("""Logits:""" , logits[0, :3] ) print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] ) lowerCAmelCase_ : Optional[Any] = timm_model(lowercase__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(lowercase__ ) processor.save_pretrained(lowercase__ ) if push_to_hub: print(f'Pushing model {model_name} and processor to the hub' ) model.push_to_hub(f'ybelkada/{model_name}' ) processor.push_to_hub(f'ybelkada/{model_name}' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='resnetv2_50x1_bitm', type=str, help='Name of the BiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model to the hub.', ) __UpperCAmelCase = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
28
0
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __A = abspath(join(dirname(dirname(dirname(__file__))), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def UpperCamelCase__ ( lowercase__ : Any ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(lowercase__ ) def UpperCamelCase__ ( lowercase__ : Optional[int] ): from transformers.testing_utils import pytest_terminal_summary_main snake_case : Any = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(lowercase__ , id=lowercase__ )
148
"""simple docstring""" import importlib import inspect import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __A = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. __A = importlib.util.spec_from_file_location( "transformers", os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) __A = spec.loader.load_module() __A = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __A = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)") __A = { "CLIPConfigMixin", "DecisionTransformerConfigMixin", "EncoderDecoderConfigMixin", "RagConfigMixin", "SpeechEncoderDecoderConfigMixin", "VisionEncoderDecoderConfigMixin", "VisionTextDualEncoderConfigMixin", } def UpperCamelCase__ ( ): snake_case : Dict = [] for config_class in list(CONFIG_MAPPING.values() ): snake_case : Tuple = False # source code of `config_class` snake_case : Tuple = inspect.getsource(lowercase__ ) snake_case : Optional[int] = _re_checkpoint.findall(lowercase__ ) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` snake_case , snake_case : str = checkpoint # verify the checkpoint name corresponds to the checkpoint link snake_case : Optional[int] = F'''https://huggingface.co/{ckpt_name}''' if ckpt_link == ckpt_link_from_name: snake_case : Any = True break snake_case : Optional[Any] = config_class.__name__ if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(lowercase__ ) if len(lowercase__ ) > 0: snake_case : Optional[Any] = "\n".join(sorted(lowercase__ ) ) raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
148
1
"""simple docstring""" import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP __SCREAMING_SNAKE_CASE : str = False try: __SCREAMING_SNAKE_CASE : int = _is_package_available('''google.colab''') except ModuleNotFoundError: pass @input.register class __lowerCamelCase : def __init__(self , lowerCamelCase = None , lowerCamelCase = [] ): '''simple docstring''' _lowerCAmelCase = 0 _lowerCAmelCase = choices _lowerCAmelCase = prompt if sys.platform == "win32": _lowerCAmelCase = "*" else: _lowerCAmelCase = "➔ " def A__ (self , lowerCamelCase , lowerCamelCase = "" ): '''simple docstring''' if sys.platform != "win32": writeColor(self.choices[index] , 32 , lowerCamelCase ) else: forceWrite(self.choices[index] , lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' if index == self.position: forceWrite(f""" {self.arrow_char} """ ) self.write_choice(lowerCamelCase ) else: forceWrite(f""" {self.choices[index]}""" ) reset_cursor() def A__ (self , lowerCamelCase , lowerCamelCase = 1 ): '''simple docstring''' _lowerCAmelCase = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(lowerCamelCase ) move_cursor(lowerCamelCase , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["""up"""] ) def A__ (self ): '''simple docstring''' self.move_direction(Direction.UP ) @input.mark(KEYMAP["""down"""] ) def A__ (self ): '''simple docstring''' self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["""newline"""] ) def A__ (self ): '''simple docstring''' move_cursor(len(self.choices ) - self.position , """DOWN""" ) return self.position @input.mark(KEYMAP["""interrupt"""] ) def A__ (self ): '''simple docstring''' move_cursor(len(self.choices ) - self.position , """DOWN""" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(lowerCamelCase )] for number in range(10 )] ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = int(chr(self.current_selection ) ) _lowerCAmelCase = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , lowerCamelCase ) else: return else: return def A__ (self , lowerCamelCase = 0 ): '''simple docstring''' if self.prompt: linebreak() forceWrite(self.prompt , """\n""" ) if in_colab: forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" ) else: forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" ) _lowerCAmelCase = default_choice for i in range(len(self.choices ) ): self.print_choice(lowerCamelCase ) forceWrite("""\n""" ) move_cursor(len(self.choices ) - self.position , """UP""" ) with cursor.hide(): while True: if in_colab: try: _lowerCAmelCase = int(builtins.input() ) except ValueError: _lowerCAmelCase = default_choice else: _lowerCAmelCase = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , """UP""" ) clear_line() self.write_choice(lowerCamelCase , """\n""" ) return choice
354
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int = 600851475143 ) -> int: """simple docstring""" try: _lowerCAmelCase = int(snake_case_ ) except (TypeError, ValueError): raise TypeError("""Parameter n must be int or castable to int.""" ) if n <= 0: raise ValueError("""Parameter n must be greater than or equal to one.""" ) _lowerCAmelCase = 1 _lowerCAmelCase = 2 while i * i <= n: while n % i == 0: _lowerCAmelCase = i n //= i i += 1 if n > 1: _lowerCAmelCase = n return int(snake_case_ ) if __name__ == "__main__": print(F'{solution() = }')
317
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _SCREAMING_SNAKE_CASE : Dict = { "configuration_rag": ["RagConfig"], "retrieval_rag": ["RagRetriever"], "tokenization_rag": ["RagTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Optional[int] = [ "RagModel", "RagPreTrainedModel", "RagSequenceForGeneration", "RagTokenForGeneration", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : List[Any] = [ "TFRagModel", "TFRagPreTrainedModel", "TFRagSequenceForGeneration", "TFRagTokenForGeneration", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys _SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _SCREAMING_SNAKE_CASE : Tuple = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"] _SCREAMING_SNAKE_CASE : List[Any] = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Dict = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
1
from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCamelCase : str =logging.get_logger(__name__) @add_end_docstrings(A__ ) class __a ( A__ ): def __init__( self : List[str] , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE ) requires_backends(self , "vision" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Dict , SCREAMING_SNAKE_CASE : Union[str, List[str], "Image", List["Image"]] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' return super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowercase ( self : List[str] , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' UpperCamelCase__ : List[Any] = {} if "candidate_labels" in kwargs: UpperCamelCase__ : Optional[Any] = kwargs["candidate_labels"] if "hypothesis_template" in kwargs: UpperCamelCase__ : int = kwargs["hypothesis_template"] return preprocess_params, {}, {} def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Optional[int]="This is a photo of {}." ): '''simple docstring''' UpperCamelCase__ : Dict = load_image(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : List[Any] = self.image_processor(images=[image] , return_tensors=self.framework ) UpperCamelCase__ : Any = candidate_labels UpperCamelCase__ : Dict = [hypothesis_template.format(SCREAMING_SNAKE_CASE ) for x in candidate_labels] UpperCamelCase__ : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE ) UpperCamelCase__ : List[Any] = [text_inputs] return inputs def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' UpperCamelCase__ : Tuple = model_inputs.pop("candidate_labels" ) UpperCamelCase__ : List[str] = model_inputs.pop("text_inputs" ) if isinstance(text_inputs[0] , SCREAMING_SNAKE_CASE ): UpperCamelCase__ : Dict = text_inputs[0] else: # Batching case. UpperCamelCase__ : Union[str, Any] = text_inputs[0][0] UpperCamelCase__ : Any = self.model(**SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Dict = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_image, } return model_outputs def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' UpperCamelCase__ : Optional[int] = model_outputs.pop("candidate_labels" ) UpperCamelCase__ : int = model_outputs["logits"][0] if self.framework == "pt": UpperCamelCase__ : Dict = logits.softmax(dim=-1 ).squeeze(-1 ) UpperCamelCase__ : Optional[Any] = probs.tolist() if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): UpperCamelCase__ : List[Any] = [scores] elif self.framework == "tf": UpperCamelCase__ : Optional[Any] = stable_softmax(SCREAMING_SNAKE_CASE , axis=-1 ) UpperCamelCase__ : Optional[int] = probs.numpy().tolist() else: raise ValueError(F'Unsupported framework: {self.framework}' ) UpperCamelCase__ : Optional[int] = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : -x[0] ) ] return result
196
from __future__ import annotations def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> bool: if len(__lowerCAmelCase ) == 0: return False UpperCamelCase__ : Any = len(__lowerCAmelCase ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , __lowerCAmelCase ) else: return binary_search(a_list[midpoint + 1 :] , __lowerCAmelCase ) if __name__ == "__main__": lowerCamelCase : Any =input('''Enter numbers separated by comma:\n''').strip() lowerCamelCase : Dict =[int(item.strip()) for item in user_input.split(''',''')] lowerCamelCase : List[str] =int(input('''Enter the number to be found in the list:\n''').strip()) lowerCamelCase : Union[str, Any] ='''''' if binary_search(sequence, target) else '''not ''' print(F"""{target} was {not_str}found in {sequence}""")
196
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _UpperCamelCase : Optional[Any] = logging.get_logger(__name__) class UpperCAmelCase_ ( _a): lowerCamelCase__ : Union[str, Any] = ["pixel_values"] def __init__( self , a = True , a = None , a = PILImageResampling.BILINEAR , a = True , a = None , a = True , a = 1 / 2_5_5 , a = True , a = None , a = None , **a , ) -> None: super().__init__(**a ) lowercase__ : Dict = size if size is not None else {'shortest_edge': 2_5_6} lowercase__ : Union[str, Any] = get_size_dict(a , default_to_square=a ) lowercase__ : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4} lowercase__ : Tuple = get_size_dict(a ) lowercase__ : Union[str, Any] = do_resize lowercase__ : List[Any] = size lowercase__ : List[Any] = resample lowercase__ : Any = do_center_crop lowercase__ : Optional[Any] = crop_size lowercase__ : int = do_rescale lowercase__ : int = rescale_factor lowercase__ : Dict = do_normalize lowercase__ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase__ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD def _UpperCAmelCase ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ) -> np.ndarray: lowercase__ : int = get_size_dict(a , default_to_square=a ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) lowercase__ : Optional[int] = get_resize_output_image_size(a , size=size['shortest_edge'] , default_to_square=a ) return resize(a , size=a , resample=a , data_format=a , **a ) def _UpperCAmelCase ( self , a , a , a = None , **a , ) -> np.ndarray: lowercase__ : List[str] = get_size_dict(a ) return center_crop(a , size=(size['height'], size['width']) , data_format=a , **a ) def _UpperCAmelCase ( self , a , a , a = None , **a ) -> np.ndarray: return rescale(a , scale=a , data_format=a , **a ) def _UpperCAmelCase ( self , a , a , a , a = None , **a , ) -> np.ndarray: return normalize(a , mean=a , std=a , data_format=a , **a ) def _UpperCAmelCase ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> Tuple: lowercase__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize lowercase__ : Tuple = size if size is not None else self.size lowercase__ : Tuple = get_size_dict(a , default_to_square=a ) lowercase__ : Optional[int] = resample if resample is not None else self.resample lowercase__ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size lowercase__ : Union[str, Any] = get_size_dict(a ) lowercase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize lowercase__ : List[Any] = image_mean if image_mean is not None else self.image_mean lowercase__ : Optional[Any] = image_std if image_std is not None else self.image_std lowercase__ : Tuple = make_list_of_images(a ) if not valid_images(a ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. lowercase__ : int = [to_numpy_array(a ) for image in images] if do_resize: lowercase__ : List[Any] = [self.resize(image=a , size=a , resample=a ) for image in images] if do_center_crop: lowercase__ : Any = [self.center_crop(image=a , size=a ) for image in images] if do_rescale: lowercase__ : Any = [self.rescale(image=a , scale=a ) for image in images] if do_normalize: lowercase__ : Any = [self.normalize(image=a , mean=a , std=a ) for image in images] lowercase__ : str = [to_channel_dimension_format(a , a ) for image in images] lowercase__ : int = {'pixel_values': images} return BatchFeature(data=a , tensor_type=a )
77
"""simple docstring""" from __future__ import annotations import math from collections.abc import Callable def a_ ( _lowerCAmelCase : Callable[[int | float], int | float] , _lowerCAmelCase : int | float , _lowerCAmelCase : int | float , _lowerCAmelCase : int = 100 , ): '''simple docstring''' lowercase__ : Dict = x_start lowercase__ : Union[str, Any] = fnc(_lowerCAmelCase ) lowercase__ : Optional[Any] = 0.0 for _ in range(_lowerCAmelCase ): # Approximates curve as a sequence of linear lines and sums their length lowercase__ : Union[str, Any] = (x_end - x_start) / steps + xa lowercase__ : Union[str, Any] = fnc(_lowerCAmelCase ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step lowercase__ : Union[str, Any] = xa lowercase__ : int = fxa return length if __name__ == "__main__": def a_ ( _lowerCAmelCase : List[Any] ): '''simple docstring''' return math.sin(10 * x ) print("f(x) = sin(10 * x)") print("The length of the curve from x = -10 to x = 10 is:") _UpperCamelCase : str = 10 while i <= 10_00_00: print(f'''With {i} steps: {line_length(f, -10, 10, i)}''') i *= 10
77
1
'''simple docstring''' import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __a: str = logging.get_logger(__name__) __a: Tuple = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } __a: Optional[int] = { """b0""": { """hidden_dim""": 12_80, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 2_24, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 12_80, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 2_40, """dropout_rate""": 0.2, """dw_padding""": [16], }, """b2""": { """hidden_dim""": 14_08, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 2_60, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 16], }, """b3""": { """hidden_dim""": 15_36, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 3_00, """dropout_rate""": 0.3, """dw_padding""": [5, 18], }, """b4""": { """hidden_dim""": 17_92, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 3_80, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 20_48, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 4_56, """dropout_rate""": 0.4, """dw_padding""": [13, 27], }, """b6""": { """hidden_dim""": 23_04, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 5_28, """dropout_rate""": 0.5, """dw_padding""": [31], }, """b7""": { """hidden_dim""": 25_60, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 6_00, """dropout_rate""": 0.5, """dw_padding""": [18], }, } def __UpperCamelCase ( UpperCAmelCase ): lowercase__ : List[str] = EfficientNetConfig() lowercase__ : Union[str, Any] = CONFIG_MAP[model_name]['''hidden_dim'''] lowercase__ : Any = CONFIG_MAP[model_name]['''width_coef'''] lowercase__ : Union[str, Any] = CONFIG_MAP[model_name]['''depth_coef'''] lowercase__ : Any = CONFIG_MAP[model_name]['''image_size'''] lowercase__ : List[Any] = CONFIG_MAP[model_name]['''dropout_rate'''] lowercase__ : Union[str, Any] = CONFIG_MAP[model_name]['''dw_padding'''] lowercase__ : int = '''huggingface/label-files''' lowercase__ : List[Any] = '''imagenet-1k-id2label.json''' lowercase__ : int = 1000 lowercase__ : int = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) lowercase__ : Any = {int(UpperCAmelCase ): v for k, v in idalabel.items()} lowercase__ : Any = idalabel lowercase__ : Any = {v: k for k, v in idalabel.items()} return config def __UpperCamelCase ( ): lowercase__ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase__ : str = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im def __UpperCamelCase ( UpperCAmelCase ): lowercase__ : Dict = CONFIG_MAP[model_name]['''image_size'''] lowercase__ : Optional[int] = EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=UpperCAmelCase , ) return preprocessor def __UpperCamelCase ( UpperCAmelCase ): lowercase__ : Any = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] lowercase__ : str = sorted(set(UpperCAmelCase ) ) lowercase__ : Optional[int] = len(UpperCAmelCase ) lowercase__ : Dict = {b: str(UpperCAmelCase ) for b, i in zip(UpperCAmelCase , range(UpperCAmelCase ) )} lowercase__ : Union[str, Any] = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: lowercase__ : Any = block_name_mapping[b] rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") ) rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") ) rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") ) rename_keys.append( (F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") ) rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") ) rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") ) rename_keys.append( (F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") ) rename_keys.append( (F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") ) rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") ) rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") ) rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") ) rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") ) rename_keys.append( (F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") ) rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") ) rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") ) rename_keys.append( (F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") ) rename_keys.append( (F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) lowercase__ : str = {} for item in rename_keys: if item[0] in original_param_names: lowercase__ : Tuple = '''efficientnet.''' + item[1] lowercase__ : Any = '''classifier.weight''' lowercase__ : Optional[int] = '''classifier.bias''' return key_mapping def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): for key, value in tf_params.items(): if "normalization" in key: continue lowercase__ : Union[str, Any] = key_mapping[key] if "_conv" in key and "kernel" in key: lowercase__ : Dict = torch.from_numpy(UpperCAmelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: lowercase__ : Union[str, Any] = torch.from_numpy(UpperCAmelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: lowercase__ : Any = torch.from_numpy(np.transpose(UpperCAmelCase ) ) else: lowercase__ : str = torch.from_numpy(UpperCAmelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(UpperCAmelCase ) @torch.no_grad() def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): lowercase__ : List[str] = model_classes[model_name]( include_top=UpperCAmelCase , weights='''imagenet''' , input_tensor=UpperCAmelCase , input_shape=UpperCAmelCase , pooling=UpperCAmelCase , classes=1000 , classifier_activation='''softmax''' , ) lowercase__ : List[Any] = original_model.trainable_variables lowercase__ : List[str] = original_model.non_trainable_variables lowercase__ : Tuple = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: lowercase__ : Dict = param.numpy() lowercase__ : Union[str, Any] = list(tf_params.keys() ) # Load HuggingFace model lowercase__ : Optional[int] = get_efficientnet_config(UpperCAmelCase ) lowercase__ : List[Any] = EfficientNetForImageClassification(UpperCAmelCase ).eval() lowercase__ : Tuple = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) lowercase__ : Optional[Any] = rename_keys(UpperCAmelCase ) replace_params(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # Initialize preprocessor and preprocess input image lowercase__ : List[str] = convert_image_processor(UpperCAmelCase ) lowercase__ : int = preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): lowercase__ : Tuple = hf_model(**UpperCAmelCase ) lowercase__ : Optional[int] = outputs.logits.detach().numpy() # Original model inference lowercase__ : str = False lowercase__ : List[Any] = CONFIG_MAP[model_name]['''image_size'''] lowercase__ : List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) lowercase__ : Any = image.img_to_array(UpperCAmelCase ) lowercase__ : Optional[Any] = np.expand_dims(UpperCAmelCase , axis=0 ) lowercase__ : List[Any] = original_model.predict(UpperCAmelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(UpperCAmelCase ): os.mkdir(UpperCAmelCase ) # Save converted model and image processor hf_model.save_pretrained(UpperCAmelCase ) preprocessor.save_pretrained(UpperCAmelCase ) if push_to_hub: # Push model and image processor to hub print(F"""Pushing converted {model_name} to the hub...""" ) lowercase__ : Any = F"""efficientnet-{model_name}""" preprocessor.push_to_hub(UpperCAmelCase ) hf_model.push_to_hub(UpperCAmelCase ) if __name__ == "__main__": __a: Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") __a: int = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
214
'''simple docstring''' def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ): lowercase__ : Dict = len(UpperCAmelCase ) print('''The following activities are selected:''' ) # The first activity is always selected lowercase__ : str = 0 print(UpperCAmelCase , end=''',''' ) # Consider rest of the activities for j in range(UpperCAmelCase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(UpperCAmelCase , end=''',''' ) lowercase__ : str = j if __name__ == "__main__": import doctest doctest.testmod() __a: str = [1, 3, 0, 5, 8, 5] __a: Optional[Any] = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
214
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __snake_case : str = logging.get_logger(__name__) def _UpperCAmelCase ( a__ , a__=False): '''simple docstring''' a_ : Dict = [] for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''')) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''')) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''')) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''')) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''')) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''')) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''')) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''')) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''')) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''')) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ]) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ]) # if just the base model, we should remove "vit" from all keys that start with "vit" a_ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""") else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ]) return rename_keys def _UpperCAmelCase ( a__ , a__ , a__=False): '''simple docstring''' for i in range(config.num_hidden_layers): if base_model: a_ : Tuple = """""" else: a_ : Any = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) a_ : Dict = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''') a_ : Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''') # next, add query, keys and values (in that order) to the state dict a_ : Any = in_proj_weight[ : config.hidden_size, : ] a_ : List[Any] = in_proj_bias[: config.hidden_size] a_ : Optional[int] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] a_ : Any = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] a_ : Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] a_ : Dict = in_proj_bias[-config.hidden_size :] def _UpperCAmelCase ( a__): '''simple docstring''' a_ : Tuple = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(A__ , A__) def _UpperCAmelCase ( a__ , a__ , a__): '''simple docstring''' a_ : List[str] = dct.pop(A__) a_ : Tuple = val def _UpperCAmelCase ( ): '''simple docstring''' a_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" a_ : str = Image.open(requests.get(A__ , stream=A__).raw) return im @torch.no_grad() def _UpperCAmelCase ( a__ , a__): '''simple docstring''' a_ : int = ViTConfig() a_ : int = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": a_ : int = True a_ : List[str] = int(vit_name[-1_2:-1_0]) a_ : List[Any] = int(vit_name[-9:-6]) else: a_ : Dict = 1_0_0_0 a_ : Dict = """huggingface/label-files""" a_ : Optional[int] = """imagenet-1k-id2label.json""" a_ : Optional[int] = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""") , """r""")) a_ : Optional[int] = {int(A__): v for k, v in idalabel.items()} a_ : Optional[int] = idalabel a_ : List[str] = {v: k for k, v in idalabel.items()} a_ : Optional[int] = int(vit_name[-6:-4]) a_ : List[Any] = int(vit_name[-3:]) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("""tiny"""): a_ : List[Any] = 1_9_2 a_ : int = 7_6_8 a_ : Optional[Any] = 1_2 a_ : List[Any] = 3 elif vit_name[9:].startswith("""small"""): a_ : str = 3_8_4 a_ : Any = 1_5_3_6 a_ : Optional[int] = 1_2 a_ : Optional[int] = 6 else: pass else: if vit_name[4:].startswith("""small"""): a_ : Any = 7_6_8 a_ : Dict = 2_3_0_4 a_ : Any = 8 a_ : int = 8 elif vit_name[4:].startswith("""base"""): pass elif vit_name[4:].startswith("""large"""): a_ : Union[str, Any] = 1_0_2_4 a_ : Union[str, Any] = 4_0_9_6 a_ : int = 2_4 a_ : Optional[Any] = 1_6 elif vit_name[4:].startswith("""huge"""): a_ : Optional[int] = 1_2_8_0 a_ : str = 5_1_2_0 a_ : Any = 3_2 a_ : Union[str, Any] = 1_6 # load original model from timm a_ : int = timm.create_model(A__ , pretrained=A__) timm_model.eval() # load state_dict of original model, remove and rename some keys a_ : List[Any] = timm_model.state_dict() if base_model: remove_classification_head_(A__) a_ : Any = create_rename_keys(A__ , A__) for src, dest in rename_keys: rename_key(A__ , A__ , A__) read_in_q_k_v(A__ , A__ , A__) # load HuggingFace model if vit_name[-5:] == "in21k": a_ : List[Any] = ViTModel(A__).eval() else: a_ : Any = ViTForImageClassification(A__).eval() model.load_state_dict(A__) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: a_ : Optional[Any] = DeiTImageProcessor(size=config.image_size) else: a_ : List[Any] = ViTImageProcessor(size=config.image_size) a_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""") a_ : int = encoding["""pixel_values"""] a_ : Union[str, Any] = model(A__) if base_model: a_ : List[str] = timm_model.forward_features(A__) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(A__ , outputs.pooler_output , atol=1e-3) else: a_ : List[str] = timm_model(A__) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(A__ , outputs.logits , atol=1e-3) Path(A__).mkdir(exist_ok=A__) print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''') model.save_pretrained(A__) print(f'''Saving image processor to {pytorch_dump_folder_path}''') image_processor.save_pretrained(A__) if __name__ == "__main__": __snake_case : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_patch16_224""", type=str, help="""Name of the ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) __snake_case : Tuple = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
248
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : Tuple = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ "IBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "IBertForMaskedLM", "IBertForMultipleChoice", "IBertForQuestionAnswering", "IBertForSequenceClassification", "IBertForTokenClassification", "IBertModel", "IBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys _lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class snake_case__ ( metaclass=__SCREAMING_SNAKE_CASE ): """simple docstring""" lowerCamelCase = ["""note_seq"""] def __init__( self : List[Any] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str ) -> str: """simple docstring""" requires_backends(self , ['''note_seq'''] ) @classmethod def lowerCAmelCase ( cls : Any , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[int] ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ['''note_seq'''] ) @classmethod def lowerCAmelCase ( cls : List[str] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ) -> List[Any]: """simple docstring""" requires_backends(cls , ['''note_seq'''] )
83
'''simple docstring''' def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = 50 ) -> int: '''simple docstring''' snake_case : Union[str, Any] = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f"{solution() = }")
83
1
import comet # From: unbabel-comet import torch import datasets a_ = datasets.logging.get_logger(__name__) a_ = """\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel's Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = \"{COMET}: A Neural Framework for {MT} Evaluation\", author = \"Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon\", booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\", month = nov, year = \"2020\", address = \"Online\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\", pages = \"2685--2702\", } """ a_ = """\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. """ a_ = """ COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric('comet') >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"] >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"] >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results[\"scores\"]]) [0.19, 0.92] """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def lowerCamelCase ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''sources''': datasets.Value('''string''' , id='''sequence''' ), '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[ '''https://github.com/Unbabel/COMET''', '''https://www.aclweb.org/anthology/2020.emnlp-main.213/''', '''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''', ] , ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' if self.config_name == "default": __lowerCamelCase = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) ) else: __lowerCamelCase = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ): '''simple docstring''' if gpus is None: __lowerCamelCase = 1 if torch.cuda.is_available() else 0 __lowerCamelCase = {'src': sources, 'mt': predictions, 'ref': references} __lowerCamelCase = [dict(zip(_A , _A ) ) for t in zip(*data.values() )] __lowerCamelCase = self.scorer.predict(_A , gpus=_A , progress_bar=_A ) return {"mean_score": mean_score, "scores": scores}
330
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE_ : def __init__( self : List[Any] , _A : Optional[Any] , _A : Dict=13 , _A : Union[str, Any]=30 , _A : Tuple=2 , _A : Union[str, Any]=3 , _A : Optional[int]=True , _A : Optional[Any]=True , _A : str=32 , _A : int=2 , _A : List[str]=4 , _A : List[str]=37 , _A : Tuple="gelu" , _A : Dict=0.1 , _A : Optional[Any]=0.1 , _A : Optional[int]=10 , _A : Optional[int]=0.0_2 , _A : Optional[Any]=3 , _A : str=0.6 , _A : Union[str, Any]=None , ) -> Any: """simple docstring""" snake_case_ : Optional[int] = parent snake_case_ : Tuple = batch_size snake_case_ : List[Any] = image_size snake_case_ : List[str] = patch_size snake_case_ : List[str] = num_channels snake_case_ : Optional[Any] = is_training snake_case_ : Any = use_labels snake_case_ : Tuple = hidden_size snake_case_ : Union[str, Any] = num_hidden_layers snake_case_ : List[Any] = num_attention_heads snake_case_ : Optional[Any] = intermediate_size snake_case_ : List[Any] = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Any = attention_probs_dropout_prob snake_case_ : Tuple = type_sequence_label_size snake_case_ : List[str] = initializer_range snake_case_ : Optional[Any] = mask_ratio snake_case_ : Any = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) snake_case_ : Optional[int] = (image_size // patch_size) ** 2 snake_case_ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]: """simple docstring""" snake_case_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : Union[str, Any] = None if self.use_labels: snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : Union[str, Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self : int ) -> Optional[Any]: """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def UpperCAmelCase_ ( self : List[Any] , _A : int , _A : Dict , _A : str ) -> Dict: """simple docstring""" snake_case_ : Union[str, Any] = TFViTMAEModel(config=_A ) snake_case_ : str = model(_A , training=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self : Dict , _A : Dict , _A : Any , _A : List[Any] ) -> int: """simple docstring""" snake_case_ : Any = TFViTMAEForPreTraining(_A ) snake_case_ : Optional[Any] = model(_A , training=_A ) # expected sequence length = num_patches snake_case_ : List[str] = (self.image_size // self.patch_size) ** 2 snake_case_ : Optional[Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images snake_case_ : str = 1 snake_case_ : Dict = TFViTMAEForPreTraining(_A ) snake_case_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ : List[str] = model(_A , training=_A ) snake_case_ : Optional[Any] = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" snake_case_ : List[Any] = self.prepare_config_and_inputs() ((snake_case_) ,(snake_case_) ,(snake_case_)) : Any = config_and_inputs snake_case_ : Optional[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ): __magic_name__: List[str] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () __magic_name__: str = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {} __magic_name__: Dict = False __magic_name__: Dict = False __magic_name__: List[Any] = False __magic_name__: Dict = False def UpperCAmelCase_ ( self : Any ) -> List[Any]: """simple docstring""" snake_case_ : List[Any] = TFViTMAEModelTester(self ) snake_case_ : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds' ) def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" pass def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" snake_case_ ,snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : List[Any] = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) snake_case_ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) ) def UpperCAmelCase_ ( self : List[str] ) -> Dict: """simple docstring""" snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : List[str] = model_class(_A ) snake_case_ : Any = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : Dict = [*signature.parameters.keys()] snake_case_ : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , _A ) def UpperCAmelCase_ ( self : Dict ) -> List[str]: """simple docstring""" snake_case_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCAmelCase_ ( self : List[Any] ) -> List[str]: """simple docstring""" snake_case_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_A ) def UpperCAmelCase_ ( self : Tuple ) -> Dict: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ : Optional[Any] = model_class(_A ) snake_case_ : Union[str, Any] = self._prepare_for_class(_A , _A ) snake_case_ : List[str] = model(_A , noise=_A ) snake_case_ : Tuple = copy.deepcopy(self._prepare_for_class(_A , _A ) ) snake_case_ : str = model(**_A , noise=_A ) snake_case_ : Union[str, Any] = outputs_dict[0].numpy() snake_case_ : Optional[Any] = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Tuple = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_A : int ): snake_case_ : Any = {} for k, v in inputs_dict.items(): if tf.is_tensor(_A ): snake_case_ : str = v.numpy() else: snake_case_ : Optional[Any] = np.array(_A ) return inputs_np_dict for model_class in self.all_model_classes: snake_case_ : int = model_class(_A ) snake_case_ : List[Any] = self._prepare_for_class(_A , _A ) snake_case_ : Any = prepare_numpy_arrays(_A ) snake_case_ : List[Any] = model(_A , noise=_A ) snake_case_ : List[Any] = model(**_A , noise=_A ) self.assert_outputs_same(_A , _A ) def UpperCAmelCase_ ( self : Tuple , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] ) -> List[str]: """simple docstring""" np.random.seed(2 ) snake_case_ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) snake_case_ : Optional[int] = tf.constant(_A ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument snake_case_ : Optional[Any] = tf_noise super().check_pt_tf_models(_A , _A , _A ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : int = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_A ) if module_member_name.endswith('MainLayer' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )] for module_member in (getattr(_A , _A ),) if isinstance(_A , _A ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_A , '_keras_serializable' , _A ) } snake_case_ : List[Any] = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) snake_case_ : Optional[int] = tf.convert_to_tensor(_A ) inputs_dict.update({'noise': noise} ) for main_layer_class in tf_main_layer_classes: snake_case_ : Optional[Any] = main_layer_class(_A ) snake_case_ : List[str] = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } snake_case_ : Union[str, Any] = tf.keras.Model(_A , outputs=main_layer(_A ) ) snake_case_ : int = model(_A ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : List[Any] = os.path.join(_A , 'keras_model.h5' ) model.save(_A ) snake_case_ : str = tf.keras.models.load_model( _A , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_A , tf.keras.Model ) snake_case_ : List[str] = model(_A ) self.assert_outputs_same(_A , _A ) @slow def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : int = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ : Optional[Any] = model_class(_A ) snake_case_ : Optional[Any] = self._prepare_for_class(_A , _A ) snake_case_ : int = model(_A , noise=_A ) if model_class.__name__ == "TFViTMAEModel": snake_case_ : Any = outputs.last_hidden_state.numpy() snake_case_ : Optional[int] = 0 else: snake_case_ : str = outputs.logits.numpy() snake_case_ : Optional[Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A , saved_model=_A ) snake_case_ : Any = model_class.from_pretrained(_A ) snake_case_ : Any = model(_A , noise=_A ) if model_class.__name__ == "TFViTMAEModel": snake_case_ : Dict = after_outputs['last_hidden_state'].numpy() snake_case_ : Dict = 0 else: snake_case_ : Any = after_outputs['logits'].numpy() snake_case_ : Optional[Any] = 0 snake_case_ : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1E-5 ) def UpperCAmelCase_ ( self : Any ) -> str: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ : str = model_class(_A ) snake_case_ : int = self._prepare_for_class(_A , _A ) snake_case_ : str = model(_A , noise=_A ) snake_case_ : Dict = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_A ) snake_case_ : Any = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config snake_case_ : str = model_class.from_config(model.config ) snake_case_ : Union[str, Any] = new_model(_A ) # Build model new_model.set_weights(model.get_weights() ) snake_case_ : List[str] = new_model(_A , noise=_A ) self.assert_outputs_same(_A , _A ) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]: """simple docstring""" pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' ) def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple: """simple docstring""" pass @slow def UpperCAmelCase_ ( self : Tuple ) -> Tuple: """simple docstring""" snake_case_ : Optional[Any] = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' ) self.assertIsNotNone(_A ) def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def UpperCAmelCase_ ( self : str ) -> Dict: """simple docstring""" return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None @slow def UpperCAmelCase_ ( self : str ) -> Dict: """simple docstring""" np.random.seed(2 ) snake_case_ : List[str] = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ) snake_case_ : List[Any] = self.default_image_processor snake_case_ : Dict = prepare_img() snake_case_ : Optional[Any] = image_processor(images=_A , return_tensors='tf' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) snake_case_ : int = ViTMAEConfig() snake_case_ : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) snake_case_ : List[Any] = np.random.uniform(size=(1, num_patches) ) # forward pass snake_case_ : Optional[Any] = model(**_A , noise=_A ) # verify the logits snake_case_ : Optional[int] = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , _A ) snake_case_ : Any = tf.convert_to_tensor( [[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , _A , atol=1E-4 )
327
0
from math import isclose, sqrt def __lowerCamelCase ( lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float ): '''simple docstring''' lowerCamelCase = point_y / 4 / point_x lowerCamelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) lowerCamelCase = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) lowerCamelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 lowerCamelCase = outgoing_gradient**2 + 4 lowerCamelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) lowerCamelCase = (point_y - outgoing_gradient * point_x) ** 2 - 100 lowerCamelCase = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) lowerCamelCase = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point lowerCamelCase = x_minus if isclose(lowerCamelCase__ , lowerCamelCase__ ) else x_plus lowerCamelCase = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def __lowerCamelCase ( lowerCamelCase__ : float = 1.4 , lowerCamelCase__ : float = -9.6 ): '''simple docstring''' lowerCamelCase = 0 lowerCamelCase = first_x_coord lowerCamelCase = first_y_coord lowerCamelCase = (1_0.1 - point_y) / (0.0 - point_x) while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0): lowerCamelCase , lowerCamelCase , lowerCamelCase = next_point(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f"""{solution() = }""")
66
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : Union[str, Any] = { "configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"], "feature_extraction_mctct": ["MCTCTFeatureExtractor"], "processing_mctct": ["MCTCTProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = [ "MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST", "MCTCTForCTC", "MCTCTModel", "MCTCTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
1
import os from typing import Dict, List, Tuple, TypeVar, Union __lowerCAmelCase = TypeVar('''T''') __lowerCAmelCase = Union[List[T], Tuple[T, ...]] __lowerCAmelCase = Union[T, List[T], Dict[str, T]] __lowerCAmelCase = Union[str, bytes, os.PathLike]
196
import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class __a ( __UpperCamelCase , unittest.TestCase ): __lowercase : str = CpmAntTokenizer __lowercase : List[Any] = False def SCREAMING_SNAKE_CASE__ ( self ) -> str: '''simple docstring''' super().setUp() lowercase__: Any = [ '<d>', '</d>', '<s>', '</s>', '</_>', '<unk>', '<pad>', '</n>', '我', '是', 'C', 'P', 'M', 'A', 'n', 't', ] lowercase__: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) @tooslow def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple: '''simple docstring''' lowercase__: Optional[int] = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' ) lowercase__: Optional[Any] = '今天天气真好!' lowercase__: str = ['今天', '天气', '真', '好', '!'] lowercase__: Optional[Any] = tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) lowercase__: List[str] = '今天天气真好!' lowercase__: List[str] = [tokenizer.bos_token] + tokens lowercase__: Tuple = [6, 9_802, 14_962, 2_082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ ) lowercase__: Any = tokenizer.decode(lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
196
1
import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : int = 1 __magic_name__ : List[Any] = 3 __magic_name__ : Tuple = (32, 32) __magic_name__ : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_SCREAMING_SNAKE_CASE ) return image @property def SCREAMING_SNAKE_CASE ( self ): torch.manual_seed(0 ) __magic_name__ : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) return model @property def SCREAMING_SNAKE_CASE ( self ): torch.manual_seed(0 ) __magic_name__ : Dict = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def SCREAMING_SNAKE_CASE ( self ): torch.manual_seed(0 ) __magic_name__ : str = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , ) return RobertaSeriesModelWithTransformation(_SCREAMING_SNAKE_CASE ) @property def SCREAMING_SNAKE_CASE ( self ): def extract(*_a , **_a ): class _snake_case : def __init__( self ): __magic_name__ : Tuple = torch.ones([0] ) def SCREAMING_SNAKE_CASE ( self , _a ): self.pixel_values.to(_SCREAMING_SNAKE_CASE ) return self return Out() return extract def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator __magic_name__ : List[Any] = self.dummy_cond_unet __magic_name__ : Optional[Any] = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE ) __magic_name__ : List[Any] = self.dummy_vae __magic_name__ : int = self.dummy_text_encoder __magic_name__ : Tuple = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) __magic_name__ : List[str] = 77 __magic_name__ : List[Any] = self.dummy_image.to(_SCREAMING_SNAKE_CASE ) __magic_name__ : Any = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk __magic_name__ : Union[str, Any] = AltDiffusionImgaImgPipeline( unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , ) __magic_name__ : int = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_SCREAMING_SNAKE_CASE ) __magic_name__ : Optional[int] = alt_pipe.to(_SCREAMING_SNAKE_CASE ) alt_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) __magic_name__ : int = "A painting of a squirrel eating a burger" __magic_name__ : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 ) __magic_name__ : Union[str, Any] = alt_pipe( [prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=_SCREAMING_SNAKE_CASE , ) __magic_name__ : Dict = output.images __magic_name__ : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 ) __magic_name__ : Optional[Any] = alt_pipe( [prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0] __magic_name__ : List[Any] = image[0, -3:, -3:, -1] __magic_name__ : List[str] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __magic_name__ : Union[str, Any] = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Optional[int] = self.dummy_cond_unet __magic_name__ : Optional[int] = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE ) __magic_name__ : Tuple = self.dummy_vae __magic_name__ : Any = self.dummy_text_encoder __magic_name__ : Dict = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) __magic_name__ : Dict = 77 __magic_name__ : Tuple = self.dummy_image.to(_SCREAMING_SNAKE_CASE ) # put models in fp16 __magic_name__ : Union[str, Any] = unet.half() __magic_name__ : Optional[Any] = vae.half() __magic_name__ : Tuple = bert.half() # make sure here that pndm scheduler skips prk __magic_name__ : List[str] = AltDiffusionImgaImgPipeline( unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , ) __magic_name__ : Optional[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_SCREAMING_SNAKE_CASE ) __magic_name__ : Dict = alt_pipe.to(_SCREAMING_SNAKE_CASE ) alt_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) __magic_name__ : Union[str, Any] = "A painting of a squirrel eating a burger" __magic_name__ : List[str] = torch.manual_seed(0 ) __magic_name__ : Dict = alt_pipe( [prompt] , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="np" , image=_SCREAMING_SNAKE_CASE , ).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) # resize to resolution that is divisible by 8 but not 16 or 32 __magic_name__ : List[str] = init_image.resize((760, 504) ) __magic_name__ : Optional[int] = "BAAI/AltDiffusion" __magic_name__ : Union[str, Any] = AltDiffusionImgaImgPipeline.from_pretrained( _SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , ) pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) pipe.enable_attention_slicing() __magic_name__ : Union[str, Any] = "A fantasy landscape, trending on artstation" __magic_name__ : Union[str, Any] = torch.manual_seed(0 ) __magic_name__ : List[str] = pipe( prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , generator=_SCREAMING_SNAKE_CASE , output_type="np" , ) __magic_name__ : Tuple = output.images[0] __magic_name__ : Dict = image[255:258, 383:386, -1] assert image.shape == (504, 760, 3) __magic_name__ : Tuple = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) __magic_name__ : Optional[Any] = init_image.resize((768, 512) ) __magic_name__ : Union[str, Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" ) __magic_name__ : List[Any] = "BAAI/AltDiffusion" __magic_name__ : Dict = AltDiffusionImgaImgPipeline.from_pretrained( _SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , ) pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) pipe.enable_attention_slicing() __magic_name__ : int = "A fantasy landscape, trending on artstation" __magic_name__ : Optional[Any] = torch.manual_seed(0 ) __magic_name__ : List[Any] = pipe( prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , generator=_SCREAMING_SNAKE_CASE , output_type="np" , ) __magic_name__ : Dict = output.images[0] assert image.shape == (512, 768, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1e-2
368
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class _snake_case ( snake_case ): def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Union[str, Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_a , "tf_padding" ) ) self.parent.assertTrue(hasattr(_a , "depth_multiplier" ) ) class _snake_case : def __init__( self , _a , _a=13 , _a=3 , _a=32 , _a=0.25 , _a=8 , _a=True , _a=1_024 , _a=32 , _a="relu6" , _a=0.1 , _a=0.02 , _a=True , _a=True , _a=10 , _a=None , ): __magic_name__ : Optional[int] = parent __magic_name__ : Union[str, Any] = batch_size __magic_name__ : Tuple = num_channels __magic_name__ : Tuple = image_size __magic_name__ : Any = depth_multiplier __magic_name__ : Any = min_depth __magic_name__ : Any = tf_padding __magic_name__ : int = int(last_hidden_size * depth_multiplier ) __magic_name__ : Any = output_stride __magic_name__ : Tuple = hidden_act __magic_name__ : Optional[int] = classifier_dropout_prob __magic_name__ : Any = use_labels __magic_name__ : str = is_training __magic_name__ : List[str] = num_labels __magic_name__ : str = initializer_range __magic_name__ : Union[str, Any] = scope def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __magic_name__ : Any = None __magic_name__ : Optional[Any] = None if self.use_labels: __magic_name__ : int = ids_tensor([self.batch_size] , self.num_labels ) __magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __magic_name__ : List[Any] = self.get_config() return config, pixel_values, labels, pixel_labels def SCREAMING_SNAKE_CASE ( self ): return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a ): __magic_name__ : List[str] = MobileNetVaModel(config=_a ) model.to(_a ) model.eval() __magic_name__ : Optional[int] = model(_a ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a ): __magic_name__ : Any = self.num_labels __magic_name__ : Dict = MobileNetVaForImageClassification(_a ) model.to(_a ) model.eval() __magic_name__ : Any = model(_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Any = self.prepare_config_and_inputs() __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = config_and_inputs __magic_name__ : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _snake_case ( snake_case , snake_case , unittest.TestCase ): UpperCamelCase__ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () UpperCamelCase__ = ( {'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification} if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : List[Any] = MobileNetVaModelTester(self ) __magic_name__ : List[str] = MobileNetVaConfigTester(self , config_class=_a , has_text_modality=_a ) def SCREAMING_SNAKE_CASE ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="MobileNetV1 does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE ( self ): pass @unittest.skip(reason="MobileNetV1 does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE ( self ): pass @unittest.skip(reason="MobileNetV1 does not output attentions" ) def SCREAMING_SNAKE_CASE ( self ): pass def SCREAMING_SNAKE_CASE ( self ): __magic_name__ , __magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ : str = model_class(_a ) __magic_name__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __magic_name__ : Tuple = [*signature.parameters.keys()] __magic_name__ : List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , _a ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def SCREAMING_SNAKE_CASE ( self ): def check_hidden_states_output(_a , _a , _a ): __magic_name__ : Dict = model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): __magic_name__ : Tuple = model(**self._prepare_for_class(_a , _a ) ) __magic_name__ : Optional[Any] = outputs.hidden_states __magic_name__ : Optional[Any] = 26 self.assertEqual(len(_a ) , _a ) __magic_name__ , __magic_name__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ : Tuple = True check_hidden_states_output(_a , _a , _a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __magic_name__ : Dict = True check_hidden_states_output(_a , _a , _a ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) @slow def SCREAMING_SNAKE_CASE ( self ): for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ : List[str] = MobileNetVaModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def lowerCAmelCase_ ( ) -> int: '''simple docstring''' __magic_name__ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _snake_case ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE ( self ): return ( MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : str = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(_a ) __magic_name__ : Dict = self.default_image_processor __magic_name__ : List[str] = prepare_img() __magic_name__ : Optional[Any] = image_processor(images=_a , return_tensors="pt" ).to(_a ) # forward pass with torch.no_grad(): __magic_name__ : Union[str, Any] = model(**_a ) # verify the logits __magic_name__ : List[str] = torch.Size((1, 1_001) ) self.assertEqual(outputs.logits.shape , _a ) __magic_name__ : Union[str, Any] = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(_a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
41
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = { '''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class SCREAMING_SNAKE_CASE__ (__snake_case ): __lowerCamelCase : List[Any] = """yolos""" def __init__( self , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.0 , a=0.0 , a=0.02 , a=1e-12 , a=[512, 864] , a=16 , a=3 , a=True , a=100 , a=True , a=False , a=1 , a=5 , a=2 , a=5 , a=2 , a=0.1 , **a , ): super().__init__(**a) lowercase__ : Union[str, Any] = hidden_size lowercase__ : Optional[int] = num_hidden_layers lowercase__ : List[Any] = num_attention_heads lowercase__ : Tuple = intermediate_size lowercase__ : str = hidden_act lowercase__ : Tuple = hidden_dropout_prob lowercase__ : Any = attention_probs_dropout_prob lowercase__ : Optional[Any] = initializer_range lowercase__ : str = layer_norm_eps lowercase__ : Optional[int] = image_size lowercase__ : int = patch_size lowercase__ : Union[str, Any] = num_channels lowercase__ : str = qkv_bias lowercase__ : List[str] = num_detection_tokens lowercase__ : Dict = use_mid_position_embeddings lowercase__ : List[str] = auxiliary_loss # Hungarian matcher lowercase__ : Tuple = class_cost lowercase__ : int = bbox_cost lowercase__ : str = giou_cost # Loss coefficients lowercase__ : Union[str, Any] = bbox_loss_coefficient lowercase__ : List[str] = giou_loss_coefficient lowercase__ : Dict = eos_coefficient class SCREAMING_SNAKE_CASE__ (__snake_case ): __lowerCamelCase : Optional[int] = version.parse("""1.11""" ) @property def snake_case_ ( self): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def snake_case_ ( self): return 1e-4 @property def snake_case_ ( self): return 12
214
from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar snake_case_ = TypeVar('''T''') snake_case_ = TypeVar('''U''') class SCREAMING_SNAKE_CASE__ (Generic[T, U] ): def __init__( self , a , a): lowercase__ : List[Any] = key lowercase__ : List[Any] = val lowercase__ : DoubleLinkedListNode[T, U] | None = None lowercase__ : DoubleLinkedListNode[T, U] | None = None def __repr__( self): return ( f"""Node: key: {self.key}, val: {self.val}, """ f"""has next: {bool(self.next)}, has prev: {bool(self.prev)}""" ) class SCREAMING_SNAKE_CASE__ (Generic[T, U] ): def __init__( self): lowercase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(a , a) lowercase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(a , a) lowercase__ , lowercase__ : Union[str, Any] = self.rear, self.head def __repr__( self): lowercase__ : Any = ['DoubleLinkedList'] lowercase__ : List[str] = self.head while node.next is not None: rep.append(str(a)) lowercase__ : Tuple = node.next rep.append(str(self.rear)) return ",\n ".join(a) def snake_case_ ( self , a): lowercase__ : Optional[Any] = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None lowercase__ : Dict = node lowercase__ : int = previous lowercase__ : Union[str, Any] = node lowercase__ : Optional[int] = self.rear def snake_case_ ( self , a): if node.prev is None or node.next is None: return None lowercase__ : Union[str, Any] = node.next lowercase__ : Tuple = node.prev lowercase__ : Union[str, Any] = None lowercase__ : List[Any] = None return node class SCREAMING_SNAKE_CASE__ (Generic[T, U] ): __lowerCamelCase : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self , a): lowercase__ : DoubleLinkedList[T, U] = DoubleLinkedList() lowercase__ : Optional[Any] = capacity lowercase__ : Union[str, Any] = 0 lowercase__ : Tuple = 0 lowercase__ : int = 0 lowercase__ : dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__( self): return ( f"""CacheInfo(hits={self.hits}, misses={self.miss}, """ f"""capacity={self.capacity}, current size={self.num_keys})""" ) def __contains__( self , a): return key in self.cache def snake_case_ ( self , a): # Note: pythonic interface would throw KeyError rather than return None if key in self.cache: self.hits += 1 lowercase__ : DoubleLinkedListNode[T, U] = self.cache[key] lowercase__ : str = self.list.remove(self.cache[key]) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(a) return node.val self.miss += 1 return None def snake_case_ ( self , a , a): if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity lowercase__ : Optional[int] = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(a) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 lowercase__ : Optional[Any] = DoubleLinkedListNode(a , a) self.list.add(self.cache[key]) self.num_keys += 1 else: # bump node to the end of the list, update value lowercase__ : Any = self.list.remove(self.cache[key]) assert node is not None # node guaranteed to be in list lowercase__ : Union[str, Any] = value self.list.add(a) @classmethod def snake_case_ ( cls , a = 128): def cache_decorator_inner(a) -> Callable[..., U]: def cache_decorator_wrapper(*a) -> U: if func not in cls.decorator_function_to_instance_map: lowercase__ : Dict = LRUCache(a) lowercase__ : str = cls.decorator_function_to_instance_map[func].get(args[0]) if result is None: lowercase__ : str = func(*a) cls.decorator_function_to_instance_map[func].put(args[0] , a) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(a , 'cache_info' , a) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
214
1
import os from pathlib import Path def UpperCamelCase_( ) -> List[Any]: from torch.utils.cpp_extension import load UpperCAmelCase__ = Path(snake_case__ ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr' UpperCAmelCase__ = [ root / filename for filename in [ 'vision.cpp', os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ), os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ), ] ] load( 'MultiScaleDeformableAttention' , snake_case__ , with_cuda=snake_case__ , extra_include_paths=[str(snake_case__ )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[ '-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__', ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
335
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING _UpperCamelCase = logging.get_logger(__name__) @add_end_docstrings(_UpperCamelCase ) class lowercase ( _UpperCamelCase ): '''simple docstring''' def __init__(self , **__a ) -> Optional[Any]: """simple docstring""" super().__init__(**__a ) requires_backends(self , 'vision' ) requires_backends(self , 'torch' ) if self.framework != "pt": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) self.check_model_type(__a ) def UpperCamelCase__ (self , **__a ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = {} UpperCAmelCase__ = {} UpperCAmelCase__ = {} # preprocess args if "points_per_batch" in kwargs: UpperCAmelCase__ = kwargs['points_per_batch'] if "points_per_crop" in kwargs: UpperCAmelCase__ = kwargs['points_per_crop'] if "crops_n_layers" in kwargs: UpperCAmelCase__ = kwargs['crops_n_layers'] if "crop_overlap_ratio" in kwargs: UpperCAmelCase__ = kwargs['crop_overlap_ratio'] if "crop_n_points_downscale_factor" in kwargs: UpperCAmelCase__ = kwargs['crop_n_points_downscale_factor'] # postprocess args if "pred_iou_thresh" in kwargs: UpperCAmelCase__ = kwargs['pred_iou_thresh'] if "stability_score_offset" in kwargs: UpperCAmelCase__ = kwargs['stability_score_offset'] if "mask_threshold" in kwargs: UpperCAmelCase__ = kwargs['mask_threshold'] if "stability_score_thresh" in kwargs: UpperCAmelCase__ = kwargs['stability_score_thresh'] if "crops_nms_thresh" in kwargs: UpperCAmelCase__ = kwargs['crops_nms_thresh'] if "output_rle_mask" in kwargs: UpperCAmelCase__ = kwargs['output_rle_mask'] if "output_bboxes_mask" in kwargs: UpperCAmelCase__ = kwargs['output_bboxes_mask'] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__(self , __a , *__a , __a=None , __a=None , **__a ) -> List[str]: """simple docstring""" return super().__call__(__a , *__a , num_workers=__a , batch_size=__a , **__a ) def UpperCamelCase__ (self , __a , __a=64 , __a = 0 , __a = 512 / 1500 , __a = 32 , __a = 1 , ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ = load_image(__a ) UpperCAmelCase__ = self.image_processor.size['longest_edge'] UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.generate_crop_boxes( __a , __a , __a , __a , __a , __a ) UpperCAmelCase__ = self.image_processor(images=__a , return_tensors='pt' ) with self.device_placement(): if self.framework == "pt": UpperCAmelCase__ = self.get_inference_context() with inference_context(): UpperCAmelCase__ = self._ensure_tensor_on_device(__a , device=self.device ) UpperCAmelCase__ = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) ) UpperCAmelCase__ = image_embeddings UpperCAmelCase__ = grid_points.shape[1] UpperCAmelCase__ = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( 'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. ' 'To return all points at once, set points_per_batch to None' ) for i in range(0 , __a , __a ): UpperCAmelCase__ = grid_points[:, i : i + points_per_batch, :, :] UpperCAmelCase__ = input_labels[:, i : i + points_per_batch] UpperCAmelCase__ = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def UpperCamelCase__ (self , __a , __a=0.88 , __a=0.95 , __a=0 , __a=1 , ) -> Dict: """simple docstring""" UpperCAmelCase__ = model_inputs.pop('input_boxes' ) UpperCAmelCase__ = model_inputs.pop('is_last' ) UpperCAmelCase__ = model_inputs.pop('original_sizes' ).tolist() UpperCAmelCase__ = model_inputs.pop('reshaped_input_sizes' ).tolist() UpperCAmelCase__ = self.model(**__a ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks UpperCAmelCase__ = model_outputs['pred_masks'] UpperCAmelCase__ = self.image_processor.post_process_masks( __a , __a , __a , __a , binarize=__a ) UpperCAmelCase__ = model_outputs['iou_scores'] UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __a , __a , __a , __a , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def UpperCamelCase__ (self , __a , __a=False , __a=False , __a=0.7 , ) -> Dict: """simple docstring""" UpperCAmelCase__ = [] UpperCAmelCase__ = [] UpperCAmelCase__ = [] for model_output in model_outputs: all_scores.append(model_output.pop('iou_scores' ) ) all_masks.extend(model_output.pop('masks' ) ) all_boxes.append(model_output.pop('boxes' ) ) UpperCAmelCase__ = torch.cat(__a ) UpperCAmelCase__ = torch.cat(__a ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.post_process_for_mask_generation( __a , __a , __a , __a ) UpperCAmelCase__ = defaultdict(__a ) for output in model_outputs: for k, v in output.items(): extra[k].append(__a ) UpperCAmelCase__ = {} if output_rle_mask: UpperCAmelCase__ = rle_mask if output_bboxes_mask: UpperCAmelCase__ = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
335
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) snake_case_ : Any = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : List[str] = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys snake_case_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
83
'''simple docstring''' def A__ ( UpperCAmelCase_ = 1 , UpperCAmelCase_ = 1_0_0_0 ): _UpperCamelCase : int = 1 _UpperCamelCase : Union[str, Any] = 0 for divide_by_number in range(UpperCAmelCase_ , digit + 1 ): _UpperCamelCase : list[int] = [] _UpperCamelCase : int = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(UpperCAmelCase_ ): _UpperCamelCase : Optional[Any] = len(UpperCAmelCase_ ) _UpperCamelCase : List[Any] = divide_by_number else: has_been_divided.append(UpperCAmelCase_ ) _UpperCamelCase : str = now_divide * 1_0 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
83
1
import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": UpperCamelCase__ =argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') UpperCamelCase__ =parser.parse_args() if args.model_type == "bert": UpperCamelCase__ =BertForMaskedLM.from_pretrained(args.model_name) UpperCamelCase__ ="bert" else: raise ValueError('args.model_type should be \"bert\".') UpperCamelCase__ =model.state_dict() UpperCamelCase__ ={} for w in ["word_embeddings", "position_embeddings"]: UpperCamelCase__ =state_dict[f"{prefix}.embeddings.{w}.weight"] for w in ["weight", "bias"]: UpperCamelCase__ =state_dict[f"{prefix}.embeddings.LayerNorm.{w}"] UpperCamelCase__ =0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: UpperCamelCase__ =state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}" ] UpperCamelCase__ =state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}" ] UpperCamelCase__ =state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}" ] UpperCamelCase__ =state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}" ] UpperCamelCase__ =state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}" ] UpperCamelCase__ =state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}" ] UpperCamelCase__ =state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}" ] UpperCamelCase__ =state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}" ] std_idx += 1 UpperCamelCase__ =state_dict["cls.predictions.decoder.weight"] UpperCamelCase__ =state_dict["cls.predictions.bias"] if args.vocab_transform: for w in ["weight", "bias"]: UpperCamelCase__ =state_dict[f"cls.predictions.transform.dense.{w}"] UpperCamelCase__ =state_dict[f"cls.predictions.transform.LayerNorm.{w}"] print(f"N layers selected for distillation: {std_idx}") print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(f"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
371
import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class lowerCAmelCase__( __lowercase , __lowercase ): '''simple docstring''' @register_to_config def __init__( self , __lowerCamelCase = 1_2_8 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 2000.0 , __lowerCamelCase = 7_6_8 , __lowerCamelCase = 1_2 , __lowerCamelCase = 1_2 , __lowerCamelCase = 6_4 , __lowerCamelCase = 2_0_4_8 , __lowerCamelCase = 0.1 , ) -> int: super().__init__() _SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential( nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , ) _SCREAMING_SNAKE_CASE : str = nn.Embedding(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = False _SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(p=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList() for lyr_num in range(__lowerCamelCase ): # FiLM conditional T5 decoder _SCREAMING_SNAKE_CASE : Optional[int] = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase ) self.decoders.append(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Tuple = TaLayerNorm(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = nn.Dropout(p=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]: _SCREAMING_SNAKE_CASE : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. _SCREAMING_SNAKE_CASE : Tuple = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) _SCREAMING_SNAKE_CASE : str = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) _SCREAMING_SNAKE_CASE : Tuple = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. _SCREAMING_SNAKE_CASE : Optional[int] = torch.broadcast_to( torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , ) _SCREAMING_SNAKE_CASE : Union[str, Any] = self.position_encoding(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = self.continuous_inputs_projection(__lowerCamelCase ) inputs += position_encodings _SCREAMING_SNAKE_CASE : Any = self.dropout(__lowerCamelCase ) # decoder: No padding present. _SCREAMING_SNAKE_CASE : Any = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. _SCREAMING_SNAKE_CASE : List[str] = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks] # cross attend style: concat encodings _SCREAMING_SNAKE_CASE : Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) _SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: _SCREAMING_SNAKE_CASE : Optional[Any] = lyr( __lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0] _SCREAMING_SNAKE_CASE : int = self.decoder_norm(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = self.post_dropout(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = self.spec_out(__lowerCamelCase ) return spec_out class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> Dict: super().__init__() _SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE : int = self.layer[0]( __lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , ) if encoder_hidden_states is not None: _SCREAMING_SNAKE_CASE : str = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to( encoder_hidden_states.dtype ) _SCREAMING_SNAKE_CASE : Tuple = self.layer[1]( __lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , ) # Apply Film Conditional Feed Forward layer _SCREAMING_SNAKE_CASE : Optional[Any] = self.layer[-1](__lowerCamelCase , __lowerCamelCase ) return (hidden_states,) class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: super().__init__() _SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]: # pre_self_attention_layer_norm _SCREAMING_SNAKE_CASE : int = self.layer_norm(__lowerCamelCase ) if conditioning_emb is not None: _SCREAMING_SNAKE_CASE : Any = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase ) # Self-attention block _SCREAMING_SNAKE_CASE : Optional[int] = self.attention(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(__lowerCamelCase ) return hidden_states class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]: super().__init__() _SCREAMING_SNAKE_CASE : Optional[Any] = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[Any]: _SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = self.attention( __lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , ) _SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + self.dropout(__lowerCamelCase ) return layer_output class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]: super().__init__() _SCREAMING_SNAKE_CASE : Tuple = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> List[str]: _SCREAMING_SNAKE_CASE : Optional[int] = self.layer_norm(__lowerCamelCase ) if conditioning_emb is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = self.film(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = self.DenseReluDense(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = hidden_states + self.dropout(__lowerCamelCase ) return hidden_states class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: super().__init__() _SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = NewGELUActivation() def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any: _SCREAMING_SNAKE_CASE : Dict = self.act(self.wi_a(__lowerCamelCase ) ) _SCREAMING_SNAKE_CASE : Dict = self.wi_a(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = hidden_gelu * hidden_linear _SCREAMING_SNAKE_CASE : Optional[int] = self.dropout(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = self.wo(__lowerCamelCase ) return hidden_states class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> int: super().__init__() _SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(__lowerCamelCase ) ) _SCREAMING_SNAKE_CASE : str = eps def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[Any]: # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 _SCREAMING_SNAKE_CASE : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: _SCREAMING_SNAKE_CASE : str = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class lowerCAmelCase__( nn.Module ): '''simple docstring''' def UpperCamelCase_ ( self , __lowerCamelCase ) -> torch.Tensor: return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(__lowerCamelCase , 3.0 )) )) class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: super().__init__() _SCREAMING_SNAKE_CASE : Any = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict: _SCREAMING_SNAKE_CASE : List[Any] = self.scale_bias(__lowerCamelCase ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = torch.chunk(__lowerCamelCase , 2 , -1 ) _SCREAMING_SNAKE_CASE : Optional[int] = x * (1 + scale) + shift return x
325
0
"""simple docstring""" import enum import shutil import sys __a , __a = shutil.get_terminal_size() __a = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"} class lowerCamelCase ( enum.Enum ): '''simple docstring''' _A : Dict = 0 _A : str = 1 def A_ ( _lowercase, _lowercase="" ): '''simple docstring''' sys.stdout.write(str(_lowercase ) + end ) sys.stdout.flush() def A_ ( _lowercase, _lowercase, _lowercase="" ): '''simple docstring''' forceWrite(f"""\u001b[{color}m{content}\u001b[0m""", _lowercase ) def A_ ( ): '''simple docstring''' forceWrite("""\r""" ) def A_ ( _lowercase, _lowercase ): '''simple docstring''' forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" ) def A_ ( ): '''simple docstring''' forceWrite(""" """ * TERMINAL_WIDTH ) reset_cursor() def A_ ( ): '''simple docstring''' reset_cursor() forceWrite("""-""" * TERMINAL_WIDTH )
66
"""simple docstring""" from __future__ import annotations from collections import Counter from random import random class lowerCamelCase : '''simple docstring''' def __init__( self: Tuple ) -> Optional[Any]: snake_case_ :Optional[int] = {} def lowerCAmelCase_ ( self: Dict , snake_case: str ) -> None: snake_case_ :str = {} def lowerCAmelCase_ ( self: Optional[int] , snake_case: str , snake_case: str , snake_case: float ) -> None: if nodea not in self.connections: self.add_node(snake_case ) if nodea not in self.connections: self.add_node(snake_case ) snake_case_ :Dict = probability def lowerCAmelCase_ ( self: List[Any] ) -> list[str]: return list(self.connections ) def lowerCAmelCase_ ( self: Any , snake_case: str ) -> str: snake_case_ :Optional[Any] = 0 snake_case_ :List[str] = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' snake_case_ :List[str] = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(_lowercase, _lowercase, _lowercase ) snake_case_ :int = Counter(graph.get_nodes() ) snake_case_ :Optional[Any] = start for _ in range(_lowercase ): snake_case_ :Tuple = graph.transition(_lowercase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
66
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __snake_case = logging.get_logger(__name__) class lowercase__ ( _UpperCAmelCase ): A__ : List[str] =["""pixel_values"""] def __init__( self : Optional[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 255 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : bool = True , **UpperCAmelCase_ : List[Any] , ): super().__init__(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = size if size is not None else {'height': 384, 'width': 384} SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = do_resize SCREAMING_SNAKE_CASE__ = size SCREAMING_SNAKE_CASE__ = resample SCREAMING_SNAKE_CASE__ = do_rescale SCREAMING_SNAKE_CASE__ = rescale_factor SCREAMING_SNAKE_CASE__ = do_normalize SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else OPENAI_CLIP_STD SCREAMING_SNAKE_CASE__ = do_convert_rgb def A_ ( self : Any , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ): SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' ) SCREAMING_SNAKE_CASE__ = (size['height'], size['width']) return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ ) def A_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Tuple , ): return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ ) def A_ ( self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : str , ): return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ ) def A_ ( self : Optional[int] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : str , ): SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE__ = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE__ = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb SCREAMING_SNAKE_CASE__ = size if size is not None else self.size SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = make_list_of_images(UpperCAmelCase_ ) if not valid_images(UpperCAmelCase_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: SCREAMING_SNAKE_CASE__ = [convert_to_rgb(UpperCAmelCase_ ) for image in images] # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE__ = [to_numpy_array(UpperCAmelCase_ ) for image in images] if do_resize: SCREAMING_SNAKE_CASE__ = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE__ = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ ) for image in images] if do_normalize: SCREAMING_SNAKE_CASE__ = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ ) for image in images] SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images] SCREAMING_SNAKE_CASE__ = BatchFeature(data={'pixel_values': images} , tensor_type=UpperCAmelCase_ ) return encoded_outputs
367
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class lowercase__ ( _UpperCAmelCase , unittest.TestCase ): A__ : Tuple =FlaxAutoencoderKL @property def A_ ( self : Any ): SCREAMING_SNAKE_CASE__ = 4 SCREAMING_SNAKE_CASE__ = 3 SCREAMING_SNAKE_CASE__ = (32, 32) SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE__ = jax.random.uniform(UpperCAmelCase_ , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def A_ ( self : Tuple ): SCREAMING_SNAKE_CASE__ = { 'block_out_channels': [32, 64], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 4, } SCREAMING_SNAKE_CASE__ = self.dummy_input return init_dict, inputs_dict
169
0
"""simple docstring""" import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class _lowerCAmelCase : """simple docstring""" def __init__( self : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : int=1_3, UpperCAmelCase__ : int=7, UpperCAmelCase__ : Optional[Any]=6, UpperCAmelCase__ : List[str]=1_7, UpperCAmelCase__ : List[str]=2_3, UpperCAmelCase__ : Any=1_1, UpperCAmelCase__ : Dict=True, ): __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = act_dim __lowercase = state_dim __lowercase = hidden_size __lowercase = max_length __lowercase = is_training def _lowercase ( self : Any ): __lowercase = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) __lowercase = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) __lowercase = floats_tensor((self.batch_size, self.seq_length, 1) ) __lowercase = floats_tensor((self.batch_size, self.seq_length, 1) ) __lowercase = ids_tensor((self.batch_size, self.seq_length), vocab_size=1_0_0_0 ) __lowercase = random_attention_mask((self.batch_size, self.seq_length) ) __lowercase = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def _lowercase ( self : Dict ): return DecisionTransformerConfig( batch_size=self.batch_size, seq_length=self.seq_length, act_dim=self.act_dim, state_dim=self.state_dim, hidden_size=self.hidden_size, max_length=self.max_length, ) def _lowercase ( self : str, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Any, UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[Any], ): __lowercase = DecisionTransformerModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowercase = model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) self.parent.assertEqual(result.state_preds.shape, states.shape ) self.parent.assertEqual(result.action_preds.shape, actions.shape ) self.parent.assertEqual(result.return_preds.shape, returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def _lowercase ( self : int ): __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) , ) = config_and_inputs __lowercase = { "states": states, "actions": actions, "rewards": rewards, "returns_to_go": returns_to_go, "timesteps": timesteps, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class _lowerCAmelCase ( lowercase ,lowercase ,lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Tuple = (DecisionTransformerModel,) if is_torch_available() else () __UpperCAmelCase : int = () __UpperCAmelCase : Tuple = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids __UpperCAmelCase : str = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features __UpperCAmelCase : str = False __UpperCAmelCase : str = False __UpperCAmelCase : int = False __UpperCAmelCase : Any = False __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : List[Any] = False __UpperCAmelCase : Any = False __UpperCAmelCase : Any = False __UpperCAmelCase : Optional[int] = False def _lowercase ( self : Dict ): __lowercase = DecisionTransformerModelTester(self ) __lowercase = ConfigTester(self, config_class=UpperCAmelCase__, hidden_size=3_7 ) def _lowercase ( self : str ): self.config_tester.run_common_tests() def _lowercase ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) @slow def _lowercase ( self : int ): for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = DecisionTransformerModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def _lowercase ( self : Any ): __lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(UpperCAmelCase__ ) __lowercase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase = [*signature.parameters.keys()] __lowercase = [ "states", "actions", "rewards", "returns_to_go", "timesteps", "attention_mask", ] self.assertListEqual(arg_names[: len(UpperCAmelCase__ )], UpperCAmelCase__ ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def _lowercase ( self : Union[str, Any] ): __lowercase = 2 # number of steps of autoregressive prediction we will perform __lowercase = 1_0 # defined by the RL environment, may be normalized __lowercase = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" ) __lowercase = model.to(UpperCAmelCase__ ) __lowercase = model.config torch.manual_seed(0 ) __lowercase = torch.randn(1, 1, config.state_dim ).to(device=UpperCAmelCase__, dtype=torch.floataa ) # env.reset() __lowercase = torch.tensor( [[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]], device=UpperCAmelCase__ ) __lowercase = torch.tensor(UpperCAmelCase__, device=UpperCAmelCase__, dtype=torch.floataa ).reshape(1, 1, 1 ) __lowercase = state __lowercase = torch.zeros(1, 0, config.act_dim, device=UpperCAmelCase__, dtype=torch.floataa ) __lowercase = torch.zeros(1, 0, device=UpperCAmelCase__, dtype=torch.floataa ) __lowercase = torch.tensor(0, device=UpperCAmelCase__, dtype=torch.long ).reshape(1, 1 ) for step in range(UpperCAmelCase__ ): __lowercase = torch.cat([actions, torch.zeros(1, 1, config.act_dim, device=UpperCAmelCase__ )], dim=1 ) __lowercase = torch.cat([rewards, torch.zeros(1, 1, device=UpperCAmelCase__ )], dim=1 ) __lowercase = torch.ones(1, states.shape[1] ).to(dtype=torch.long, device=states.device ) with torch.no_grad(): __lowercase ,__lowercase ,__lowercase = model( states=UpperCAmelCase__, actions=UpperCAmelCase__, rewards=UpperCAmelCase__, returns_to_go=UpperCAmelCase__, timesteps=UpperCAmelCase__, attention_mask=UpperCAmelCase__, return_dict=UpperCAmelCase__, ) self.assertEqual(action_pred.shape, actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1], expected_outputs[step], atol=1E-4 ) ) __lowercase ,__lowercase ,__lowercase ,__lowercase = ( # env.step(action) torch.randn(1, 1, config.state_dim ).to(device=UpperCAmelCase__, dtype=torch.floataa ), 1.0, False, {}, ) __lowercase = action_pred[0, -1] __lowercase = torch.cat([states, state], dim=1 ) __lowercase = returns_to_go[0, -1] - reward __lowercase = torch.cat([returns_to_go, pred_return.reshape(1, 1, 1 )], dim=1 ) __lowercase = torch.cat( [timesteps, torch.ones((1, 1), device=UpperCAmelCase__, dtype=torch.long ) * (step + 1)], dim=1 )
17
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _A : List[str] =logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _A : Tuple =[] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight', F'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias', F'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight')) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight')) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias') ) rename_keys.append( (F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''), ('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''), ('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''), ('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''), ('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''), ('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''), ('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''), ('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''), ('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''), ('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''), ] ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: lowerCamelCase__ : List[Any] = state_dict.pop(UpperCamelCase ) lowerCamelCase__ : Any = val def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict: lowerCamelCase__ : Tuple = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: lowerCamelCase__ : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) lowerCamelCase__ : Optional[int] = value else: lowerCamelCase__ : Any = value return new_state_dict def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Dict: lowerCamelCase__ : Optional[int] = """""" if is_panoptic: lowerCamelCase__ : Dict = """conditional_detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) lowerCamelCase__ : List[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) lowerCamelCase__ : Union[str, Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ : int = in_proj_weight[:256, :] lowerCamelCase__ : Any = in_proj_bias[:256] lowerCamelCase__ : str = in_proj_weight[256:512, :] lowerCamelCase__ : Optional[int] = in_proj_bias[256:512] lowerCamelCase__ : Dict = in_proj_weight[-256:, :] lowerCamelCase__ : str = in_proj_bias[-256:] def SCREAMING_SNAKE_CASE_ () -> List[Any]: lowerCamelCase__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase__ : Optional[Any] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int: lowerCamelCase__ : Optional[Any] = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: lowerCamelCase__ : Any = """resnet101""" if "dc5" in model_name: lowerCamelCase__ : Optional[int] = True lowerCamelCase__ : int = """panoptic""" in model_name if is_panoptic: lowerCamelCase__ : List[str] = 250 else: lowerCamelCase__ : int = 91 lowerCamelCase__ : int = """huggingface/label-files""" lowerCamelCase__ : List[str] = """coco-detection-id2label.json""" lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase__ : Any = {int(UpperCamelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : str = idalabel lowerCamelCase__ : List[str] = {v: k for k, v in idalabel.items()} # load image processor lowerCamelCase__ : Optional[int] = """coco_panoptic""" if is_panoptic else """coco_detection""" lowerCamelCase__ : int = ConditionalDetrImageProcessor(format=UpperCamelCase ) # prepare image lowerCamelCase__ : List[str] = prepare_img() lowerCamelCase__ : int = image_processor(images=UpperCamelCase , return_tensors="""pt""" ) lowerCamelCase__ : Optional[Any] = encoding["""pixel_values"""] logger.info(f'''Converting model {model_name}...''' ) # load original model from torch hub lowerCamelCase__ : List[Any] = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase , pretrained=UpperCamelCase ).eval() lowerCamelCase__ : Dict = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: lowerCamelCase__ : Optional[Any] = """conditional_detr.""" + src rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : Dict = rename_backbone_keys(UpperCamelCase ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCamelCase , is_panoptic=UpperCamelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them lowerCamelCase__ : Dict = """conditional_detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""conditional_detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): lowerCamelCase__ : int = state_dict.pop(UpperCamelCase ) lowerCamelCase__ : Union[str, Any] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: lowerCamelCase__ : List[str] = state_dict.pop(UpperCamelCase ) lowerCamelCase__ : Any = val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: lowerCamelCase__ : int = state_dict.pop(UpperCamelCase ) lowerCamelCase__ : Tuple = val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): lowerCamelCase__ : Union[str, Any] = state_dict.pop(UpperCamelCase ) lowerCamelCase__ : Dict = val # finally, create HuggingFace model and load state dict lowerCamelCase__ : Tuple = ConditionalDetrForSegmentation(UpperCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase ) model.load_state_dict(UpperCamelCase ) model.eval() model.push_to_hub(repo_id=UpperCamelCase , organization="""DepuMeng""" , commit_message="""Add model""" ) # verify our conversion lowerCamelCase__ : Optional[Any] = conditional_detr(UpperCamelCase ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase ) assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 ) # Save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) model.save_pretrained(UpperCamelCase ) image_processor.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _A : List[Any] =argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''conditional_detr_resnet50''', type=str, help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) _A : Optional[Any] =parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
41
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer __snake_case = logging.get_logger(__name__) __snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case = { '''vocab_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt''' ), '''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''', '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json''' ), }, } __snake_case = { '''squeezebert/squeezebert-uncased''': 5_12, '''squeezebert/squeezebert-mnli''': 5_12, '''squeezebert/squeezebert-mnli-headless''': 5_12, } __snake_case = { '''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True}, } class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES __lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION __lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : Dict = SqueezeBertTokenizer def __init__( self , snake_case__=None , snake_case__=None , snake_case__=True , snake_case__="[UNK]" , snake_case__="[SEP]" , snake_case__="[PAD]" , snake_case__="[CLS]" , snake_case__="[MASK]" , snake_case__=True , snake_case__=None , **snake_case__ , ) -> Optional[Any]: '''simple docstring''' super().__init__( snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , ) UpperCAmelCase : List[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , snake_case__ ) != do_lower_case or normalizer_state.get('''strip_accents''' , snake_case__ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , snake_case__ ) != tokenize_chinese_chars ): UpperCAmelCase : int =getattr(snake_case__ , normalizer_state.pop('''type''' ) ) UpperCAmelCase : int =do_lower_case UpperCAmelCase : Tuple =strip_accents UpperCAmelCase : Any =tokenize_chinese_chars UpperCAmelCase : Union[str, Any] =normalizer_class(**snake_case__ ) UpperCAmelCase : Optional[int] =do_lower_case def UpperCAmelCase__ ( self , snake_case__ , snake_case__=None ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : List[str] =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]: '''simple docstring''' UpperCAmelCase : Dict =[self.sep_token_id] UpperCAmelCase : Any =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]: '''simple docstring''' UpperCAmelCase : Optional[int] =self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ )
360
import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class __snake_case ( unittest.TestCase ): __lowerCamelCase : Dict = MODEL_FOR_MASKED_LM_MAPPING __lowerCamelCase : Optional[int] = TF_MODEL_FOR_MASKED_LM_MAPPING def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase : List[Any] =pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' ) UpperCAmelCase : List[Any] =unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(snake_case__ , decimals=6 ) , [ {'''sequence''': '''My name is grouped''', '''score''': 2.1e-05, '''token''': 3_8015, '''token_str''': ''' grouped'''}, {'''sequence''': '''My name is accuser''', '''score''': 2.1e-05, '''token''': 2_5506, '''token_str''': ''' accuser'''}, ] , ) UpperCAmelCase : Tuple =unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(snake_case__ , decimals=6 ) , [ { '''sequence''': '''The largest city in France is grouped''', '''score''': 2.1e-05, '''token''': 3_8015, '''token_str''': ''' grouped''', }, { '''sequence''': '''The largest city in France is accuser''', '''score''': 2.1e-05, '''token''': 2_5506, '''token_str''': ''' accuser''', }, ] , ) UpperCAmelCase : Dict =unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 ) self.assertEqual( nested_simplify(snake_case__ , decimals=6 ) , [ {'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 1_3606, '''token_str''': ''' Clara'''}, {'''sequence''': '''My name is Patrick''', '''score''': 2e-05, '''token''': 3499, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Te''', '''score''': 1.9e-05, '''token''': 2941, '''token_str''': ''' Te'''}, ] , ) @require_torch def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' UpperCAmelCase : Any =pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' ) UpperCAmelCase : List[Any] =unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(snake_case__ , decimals=6 ) , [ {'''sequence''': '''My name is Maul''', '''score''': 2.2e-05, '''token''': 3_5676, '''token_str''': ''' Maul'''}, {'''sequence''': '''My name isELS''', '''score''': 2.2e-05, '''token''': 1_6416, '''token_str''': '''ELS'''}, ] , ) UpperCAmelCase : Union[str, Any] =unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(snake_case__ , decimals=6 ) , [ { '''sequence''': '''The largest city in France is Maul''', '''score''': 2.2e-05, '''token''': 3_5676, '''token_str''': ''' Maul''', }, {'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-05, '''token''': 1_6416, '''token_str''': '''ELS'''}, ] , ) UpperCAmelCase : int =unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 ) self.assertEqual( nested_simplify(snake_case__ , decimals=6 ) , [ {'''sequence''': '''My name is Patrick''', '''score''': 2.1e-05, '''token''': 3499, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Te''', '''score''': 2e-05, '''token''': 2941, '''token_str''': ''' Te'''}, {'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 1_3606, '''token_str''': ''' Clara'''}, ] , ) UpperCAmelCase : List[str] =unmasker('''My name is <mask> <mask>''' , top_k=2 ) self.assertEqual( nested_simplify(snake_case__ , decimals=6 ) , [ [ { '''score''': 2.2e-05, '''token''': 3_5676, '''token_str''': ''' Maul''', '''sequence''': '''<s>My name is Maul<mask></s>''', }, {'''score''': 2.2e-05, '''token''': 1_6416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''}, ], [ { '''score''': 2.2e-05, '''token''': 3_5676, '''token_str''': ''' Maul''', '''sequence''': '''<s>My name is<mask> Maul</s>''', }, {'''score''': 2.2e-05, '''token''': 1_6416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''}, ], ] , ) @require_torch_gpu def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : Optional[int] =pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' ) # convert model to fp16 pipe.model.half() UpperCAmelCase : Dict =pipe('''Paris is the [MASK] of France.''' ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(snake_case__ , snake_case__ ) @slow @require_torch def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : str =pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' ) self.run_large_test(snake_case__ ) @slow @require_tf def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' ) self.run_large_test(snake_case__ ) def UpperCAmelCase__ ( self , snake_case__ ) -> int: '''simple docstring''' UpperCAmelCase : Union[str, Any] =unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(snake_case__ ) , [ {'''sequence''': '''My name is John''', '''score''': 0.008, '''token''': 610, '''token_str''': ''' John'''}, {'''sequence''': '''My name is Chris''', '''score''': 0.007, '''token''': 1573, '''token_str''': ''' Chris'''}, ] , ) UpperCAmelCase : int =unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(snake_case__ ) , [ { '''sequence''': '''The largest city in France is Paris''', '''score''': 0.251, '''token''': 2201, '''token_str''': ''' Paris''', }, { '''sequence''': '''The largest city in France is Lyon''', '''score''': 0.214, '''token''': 1_2790, '''token_str''': ''' Lyon''', }, ] , ) UpperCAmelCase : int =unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 ) self.assertEqual( nested_simplify(snake_case__ ) , [ {'''sequence''': '''My name is Patrick''', '''score''': 0.005, '''token''': 3499, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Clara''', '''score''': 0.000, '''token''': 1_3606, '''token_str''': ''' Clara'''}, {'''sequence''': '''My name is Te''', '''score''': 0.000, '''token''': 2941, '''token_str''': ''' Te'''}, ] , ) @require_torch def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Any =pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' ) UpperCAmelCase : List[str] =None UpperCAmelCase : str =None self.run_pipeline_test(snake_case__ , [] ) @require_tf def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : List[str] =pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' ) UpperCAmelCase : str =None UpperCAmelCase : Dict =None self.run_pipeline_test(snake_case__ , [] ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Any: '''simple docstring''' if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' ) UpperCAmelCase : Any =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ ) UpperCAmelCase : int =[ f'''This is another {tokenizer.mask_token} test''', ] return fill_masker, examples def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : Any =fill_masker.tokenizer UpperCAmelCase : Optional[int] =fill_masker.model UpperCAmelCase : Dict =fill_masker( f'''This is a {tokenizer.mask_token}''' , ) self.assertEqual( snake_case__ , [ {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, ] , ) UpperCAmelCase : int =fill_masker([f'''This is a {tokenizer.mask_token}'''] ) self.assertEqual( snake_case__ , [ {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, ] , ) UpperCAmelCase : Optional[Any] =fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] ) self.assertEqual( snake_case__ , [ [ {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, ], [ {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, ], ] , ) with self.assertRaises(snake_case__ ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(snake_case__ ): fill_masker('''This is''' ) self.run_test_top_k(snake_case__ , snake_case__ ) self.run_test_targets(snake_case__ , snake_case__ ) self.run_test_top_k_targets(snake_case__ , snake_case__ ) self.fill_mask_with_duplicate_targets_and_top_k(snake_case__ , snake_case__ ) self.fill_mask_with_multiple_masks(snake_case__ , snake_case__ ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> List[str]: '''simple docstring''' UpperCAmelCase : List[str] =tokenizer.get_vocab() UpperCAmelCase : List[str] =sorted(vocab.keys() )[:2] # Pipeline argument UpperCAmelCase : Any =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ , targets=snake_case__ ) UpperCAmelCase : Tuple =fill_masker(f'''This is a {tokenizer.mask_token}''' ) self.assertEqual( snake_case__ , [ {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, ] , ) UpperCAmelCase : int ={vocab[el] for el in targets} self.assertEqual({el['''token'''] for el in outputs} , snake_case__ ) UpperCAmelCase : Optional[Any] =[tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['''token_str'''] for el in outputs} , set(snake_case__ ) ) # Call argument UpperCAmelCase : Union[str, Any] =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ ) UpperCAmelCase : Optional[int] =fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=snake_case__ ) self.assertEqual( snake_case__ , [ {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, ] , ) UpperCAmelCase : Any ={vocab[el] for el in targets} self.assertEqual({el['''token'''] for el in outputs} , snake_case__ ) UpperCAmelCase : Optional[int] =[tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['''token_str'''] for el in outputs} , set(snake_case__ ) ) # Score equivalence UpperCAmelCase : Any =fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=snake_case__ ) UpperCAmelCase : Union[str, Any] =[top_mask['''token_str'''] for top_mask in outputs] UpperCAmelCase : Optional[int] =[top_mask['''score'''] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(snake_case__ ) == set(snake_case__ ): UpperCAmelCase : List[Any] =fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=snake_case__ ) UpperCAmelCase : List[str] =[top_mask['''score'''] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(snake_case__ ) , nested_simplify(snake_case__ ) ) # Raises with invalid with self.assertRaises(snake_case__ ): UpperCAmelCase : int =fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(snake_case__ ): UpperCAmelCase : int =fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[''''''] ) with self.assertRaises(snake_case__ ): UpperCAmelCase : Any =fill_masker(f'''This is a {tokenizer.mask_token}''' , targets='''''' ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple: '''simple docstring''' UpperCAmelCase : List[str] =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ , top_k=2 ) UpperCAmelCase : Optional[Any] =fill_masker(f'''This is a {tokenizer.mask_token}''' ) self.assertEqual( snake_case__ , [ {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, ] , ) UpperCAmelCase : Any =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ ) UpperCAmelCase : List[Any] =fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 ) self.assertEqual( snake_case__ , [ {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, ] , ) self.assertEqual(nested_simplify(snake_case__ ) , nested_simplify(snake_case__ ) ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : str =tokenizer.get_vocab() UpperCAmelCase : int =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ ) # top_k=2, ntargets=3 UpperCAmelCase : Optional[Any] =sorted(vocab.keys() )[:3] UpperCAmelCase : str =fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=snake_case__ ) # If we use the most probably targets, and filter differently, we should still # have the same results UpperCAmelCase : Tuple =[el['''token_str'''] for el in sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(snake_case__ ).issubset(snake_case__ ): UpperCAmelCase : str =fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=snake_case__ ) # They should yield exactly the same result self.assertEqual(nested_simplify(snake_case__ ) , nested_simplify(snake_case__ ) ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Dict: '''simple docstring''' UpperCAmelCase : str =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ ) UpperCAmelCase : Union[str, Any] =tokenizer.get_vocab() # String duplicates + id duplicates UpperCAmelCase : List[Any] =sorted(vocab.keys() )[:3] UpperCAmelCase : Optional[int] =[targets[0], targets[1], targets[0], targets[2], targets[1]] UpperCAmelCase : str =fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=snake_case__ , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(snake_case__ ) , 3 ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Any =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ ) UpperCAmelCase : Union[str, Any] =fill_masker( f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 ) self.assertEqual( snake_case__ , [ [ {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, ], [ {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, ], [ {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, {'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )}, ], ] , )
78
0
"""simple docstring""" import os from pathlib import Path def _snake_case ( ): from torch.utils.cpp_extension import load A__ = Path(UpperCAmelCase_ ).resolve().parent.parent.parent / """kernels""" / """deformable_detr""" A__ = [ root / filename for filename in [ """vision.cpp""", os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ), os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ), ] ] load( """MultiScaleDeformableAttention""" , UpperCAmelCase_ , with_cuda=UpperCAmelCase_ , extra_include_paths=[str(UpperCAmelCase_ )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[ """-DCUDA_HAS_FP16=1""", """-D__CUDA_NO_HALF_OPERATORS__""", """-D__CUDA_NO_HALF_CONVERSIONS__""", """-D__CUDA_NO_HALF2_OPERATORS__""", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
335
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE_ : Optional[int] = { 'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ : Any = [ 'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST', 'PegasusXForConditionalGeneration', 'PegasusXModel', 'PegasusXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
335
1
import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=False , **lowerCamelCase__ ): lowerCamelCase_ = AutoTokenizer.from_pretrained(lowerCamelCase__ ) lowerCamelCase_ = SeqaSeqDataset(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , type_path="train" , **lowerCamelCase__ ) lowerCamelCase_ = tok.pad_token_id def get_lens(lowerCamelCase__ ): lowerCamelCase_ = tqdm( DataLoader(lowerCamelCase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowerCamelCase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) lowerCamelCase_ = [] for batch in dl: lowerCamelCase_ = batch["input_ids"].ne(lowerCamelCase__ ).sum(1 ).tolist() lowerCamelCase_ = batch["labels"].ne(lowerCamelCase__ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(lowerCamelCase__ , lowerCamelCase__ ): max_lens.append(max(lowerCamelCase__ , lowerCamelCase__ ) ) else: max_lens.extend(lowerCamelCase__ ) return max_lens lowerCamelCase_ = get_lens(lowerCamelCase__ ) lowerCamelCase_ = SeqaSeqDataset(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , type_path="val" , **lowerCamelCase__ ) lowerCamelCase_ = get_lens(lowerCamelCase__ ) pickle_save(lowerCamelCase__ , train_ds.len_file ) pickle_save(lowerCamelCase__ , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
47
__A =''' # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git ''' __A =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}] __A ={ '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
47
1