code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE : List[Any] = { "configuration_upernet": ["UperNetConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Dict = [ "UperNetForSemanticSegmentation", "UperNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys _SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
"""simple docstring""" from math import pow def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,): """simple docstring""" if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count _UpperCAmelCase = int(pow(lowercase ,lowercase ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n _UpperCAmelCase , _UpperCAmelCase = backtrack( lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. _UpperCAmelCase , _UpperCAmelCase = backtrack( lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase ) return current_sum, solutions_count def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10): raise ValueError( """Invalid input\n""" """needed_sum must be between 1 and 1000, power between 2 and 10.""" ) return backtrack(lowercase ,lowercase ,1 ,0 ,0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
289
0
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { "EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class lowercase ( lowerCAmelCase_ ): _SCREAMING_SNAKE_CASE = 'gpt_neo' _SCREAMING_SNAKE_CASE = ['past_key_values'] _SCREAMING_SNAKE_CASE = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self , lowercase=50_257 , lowercase=2_048 , lowercase=2_048 , lowercase=24 , lowercase=[[["global", "local"], 12]] , lowercase=16 , lowercase=None , lowercase=256 , lowercase="gelu_new" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=50_256 , lowercase=50_256 , **lowercase , ) -> List[Any]: lowerCAmelCase = vocab_size lowerCAmelCase = max_position_embeddings lowerCAmelCase = hidden_size lowerCAmelCase = num_layers lowerCAmelCase = num_heads lowerCAmelCase = intermediate_size lowerCAmelCase = window_size lowerCAmelCase = activation_function lowerCAmelCase = resid_dropout lowerCAmelCase = embed_dropout lowerCAmelCase = attention_dropout lowerCAmelCase = classifier_dropout lowerCAmelCase = layer_norm_epsilon lowerCAmelCase = initializer_range lowerCAmelCase = use_cache lowerCAmelCase = bos_token_id lowerCAmelCase = eos_token_id lowerCAmelCase = attention_types lowerCAmelCase = self.expand_attention_types_params(__lowerCAmelCase ) if len(self.attention_layers ) != self.num_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.attention_layers)` == `config.num_layers` """ f'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' f'`config.num_layers = {self.num_layers}`. ' """`config.attention_layers` is prepared using `config.attention_types`. """ """Please verify the value of `config.attention_types` argument.""" ) super().__init__(bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) @staticmethod def _snake_case ( lowercase ) -> List[str]: lowerCAmelCase = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' import torch lowerCAmelCase = input.size() lowerCAmelCase = len(SCREAMING_SNAKE_CASE ) lowerCAmelCase = shape[dimension] lowerCAmelCase = torch.arange(0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.div(sizedim - size , SCREAMING_SNAKE_CASE , rounding_mode="""floor""" ) + 1 lowerCAmelCase = torch.arange(SCREAMING_SNAKE_CASE ) + low_indices[:min_length][:, None] lowerCAmelCase = [slice(SCREAMING_SNAKE_CASE )] * rank lowerCAmelCase = indices lowerCAmelCase = input[s] lowerCAmelCase = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' import torch lowerCAmelCase = torch.arange(1 , SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.remainder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowerCAmelCase = remainders == 0 lowerCAmelCase = candidates[divisor_indices] lowerCAmelCase = torch.max(SCREAMING_SNAKE_CASE ) return largest_divisor, torch.div(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rounding_mode="""floor""" ) class lowercase ( lowerCAmelCase_ ): @property def _snake_case ( self ) -> List[Any]: lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(__lowerCAmelCase , direction="""inputs""" ) lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""} else: lowerCAmelCase = {0: """batch""", 1: """sequence"""} return common_inputs @property def _snake_case ( self ) -> Any: return self._config.num_heads def _snake_case ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Any: lowerCAmelCase = super(__lowerCAmelCase , self ).generate_dummy_inputs( __lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase ) # We need to order the input in the way they appears in the forward() lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowerCAmelCase , lowerCAmelCase = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowerCAmelCase = seqlen + 2 lowerCAmelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCAmelCase = [ (torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(self.num_layers ) ] lowerCAmelCase = common_inputs["""attention_mask"""] if self.use_past: lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype lowerCAmelCase = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(__lowerCAmelCase , __lowerCAmelCase , dtype=__lowerCAmelCase )] , dim=1 ) return ordered_inputs @property def _snake_case ( self ) -> Optional[int]: return 13
46
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } UpperCAmelCase__ = { """b0""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 2_2_4, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 2_4_0, """dropout_rate""": 0.2, """dw_padding""": [1_6], }, """b2""": { """hidden_dim""": 1_4_0_8, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 2_6_0, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 1_6], }, """b3""": { """hidden_dim""": 1_5_3_6, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 3_0_0, """dropout_rate""": 0.3, """dw_padding""": [5, 1_8], }, """b4""": { """hidden_dim""": 1_7_9_2, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 3_8_0, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_0_4_8, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 4_5_6, """dropout_rate""": 0.4, """dw_padding""": [1_3, 2_7], }, """b6""": { """hidden_dim""": 2_3_0_4, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 5_2_8, """dropout_rate""": 0.5, """dw_padding""": [3_1], }, """b7""": { """hidden_dim""": 2_5_6_0, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 6_0_0, """dropout_rate""": 0.5, """dw_padding""": [1_8], }, } def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = EfficientNetConfig() _UpperCAmelCase = CONFIG_MAP[model_name]["""hidden_dim"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""width_coef"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""depth_coef"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""dropout_rate"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""dw_padding"""] _UpperCAmelCase = """huggingface/label-files""" _UpperCAmelCase = """imagenet-1k-id2label.json""" _UpperCAmelCase = 10_00 _UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) ) _UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw ) return im def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = EfficientNetImageProcessor( size={"""height""": size, """width""": size} ,image_mean=[0.4_85, 0.4_56, 0.4_06] ,image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] ,do_center_crop=lowercase ,) return preprocessor def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] _UpperCAmelCase = sorted(set(lowercase ) ) _UpperCAmelCase = len(lowercase ) _UpperCAmelCase = {b: str(lowercase ) for b, i in zip(lowercase ,range(lowercase ) )} _UpperCAmelCase = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: _UpperCAmelCase = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) _UpperCAmelCase = {} for item in rename_keys: if item[0] in original_param_names: _UpperCAmelCase = """efficientnet.""" + item[1] _UpperCAmelCase = """classifier.weight""" _UpperCAmelCase = """classifier.bias""" return key_mapping def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue _UpperCAmelCase = key_mapping[key] if "_conv" in key and "kernel" in key: _UpperCAmelCase = torch.from_numpy(lowercase ).permute(3 ,2 ,0 ,1 ) elif "depthwise_kernel" in key: _UpperCAmelCase = torch.from_numpy(lowercase ).permute(2 ,3 ,0 ,1 ) elif "kernel" in key: _UpperCAmelCase = torch.from_numpy(np.transpose(lowercase ) ) else: _UpperCAmelCase = torch.from_numpy(lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase ) @torch.no_grad() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = model_classes[model_name]( include_top=lowercase ,weights="""imagenet""" ,input_tensor=lowercase ,input_shape=lowercase ,pooling=lowercase ,classes=10_00 ,classifier_activation="""softmax""" ,) _UpperCAmelCase = original_model.trainable_variables _UpperCAmelCase = original_model.non_trainable_variables _UpperCAmelCase = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: _UpperCAmelCase = param.numpy() _UpperCAmelCase = list(tf_params.keys() ) # Load HuggingFace model _UpperCAmelCase = get_efficientnet_config(lowercase ) _UpperCAmelCase = EfficientNetForImageClassification(lowercase ).eval() _UpperCAmelCase = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) _UpperCAmelCase = rename_keys(lowercase ) replace_params(lowercase ,lowercase ,lowercase ) # Initialize preprocessor and preprocess input image _UpperCAmelCase = convert_image_processor(lowercase ) _UpperCAmelCase = preprocessor(images=prepare_img() ,return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): _UpperCAmelCase = hf_model(**lowercase ) _UpperCAmelCase = outputs.logits.detach().numpy() # Original model inference _UpperCAmelCase = False _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST ) _UpperCAmelCase = image.img_to_array(lowercase ) _UpperCAmelCase = np.expand_dims(lowercase ,axis=0 ) _UpperCAmelCase = original_model.predict(lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase ,lowercase ,atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(lowercase ): os.mkdir(lowercase ) # Save converted model and image processor hf_model.save_pretrained(lowercase ) preprocessor.save_pretrained(lowercase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) _UpperCAmelCase = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowercase ) hf_model.push_to_hub(lowercase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") UpperCAmelCase__ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
289
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A__ = logging.get_logger(__name__) class a ( lowerCAmelCase_ , lowerCAmelCase_ ): __lowerCAmelCase : Union[str, Any] = 'maskformer-swin' __lowerCAmelCase : int = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self :Dict ,__lowercase :Tuple=2_2_4 ,__lowercase :Any=4 ,__lowercase :List[str]=3 ,__lowercase :str=9_6 ,__lowercase :List[str]=[2, 2, 6, 2] ,__lowercase :Tuple=[3, 6, 1_2, 2_4] ,__lowercase :Any=7 ,__lowercase :Optional[Any]=4.0 ,__lowercase :Tuple=True ,__lowercase :Optional[int]=0.0 ,__lowercase :Any=0.0 ,__lowercase :Union[str, Any]=0.1 ,__lowercase :Optional[Any]="gelu" ,__lowercase :Optional[int]=False ,__lowercase :Dict=0.02 ,__lowercase :Tuple=1e-5 ,__lowercase :Tuple=None ,__lowercase :Union[str, Any]=None ,**__lowercase :Optional[int] ,): super().__init__(**__lowerCAmelCase ) snake_case__ : Optional[Any] = image_size snake_case__ : Optional[int] = patch_size snake_case__ : str = num_channels snake_case__ : int = embed_dim snake_case__ : Any = depths snake_case__ : List[Any] = len(__lowerCAmelCase ) snake_case__ : int = num_heads snake_case__ : Optional[int] = window_size snake_case__ : List[str] = mlp_ratio snake_case__ : List[str] = qkv_bias snake_case__ : List[Any] = hidden_dropout_prob snake_case__ : int = attention_probs_dropout_prob snake_case__ : Union[str, Any] = drop_path_rate snake_case__ : Any = hidden_act snake_case__ : Any = use_absolute_embeddings snake_case__ : Dict = layer_norm_eps snake_case__ : Union[str, Any] = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model snake_case__ : int = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) ) snake_case__ : int = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 ,len(__lowerCAmelCase ) + 1 )] snake_case__ , snake_case__ : List[str] = get_aligned_output_features_output_indices( out_features=__lowerCAmelCase ,out_indices=__lowerCAmelCase ,stage_names=self.stage_names )
230
"""simple docstring""" from __future__ import annotations from collections import Counter from random import random class a : def __init__( self : Union[str, Any] ): _UpperCAmelCase = {} def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ): _UpperCAmelCase = {} def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : float ): if nodea not in self.connections: self.add_node(__lowerCAmelCase ) if nodea not in self.connections: self.add_node(__lowerCAmelCase ) _UpperCAmelCase = probability def lowerCAmelCase_ ( self : Optional[Any] ): return list(self.connections ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str ): _UpperCAmelCase = 0 _UpperCAmelCase = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(lowercase ,lowercase ,lowercase ) _UpperCAmelCase = Counter(graph.get_nodes() ) _UpperCAmelCase = start for _ in range(lowercase ): _UpperCAmelCase = graph.transition(lowercase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
289
0
"""simple docstring""" from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCAmelCase ( lowerCAmelCase_): __a : Optional[int] = 'new-model' if is_tf_available(): class _UpperCAmelCase ( lowerCAmelCase_): __a : Any = NewModelConfig @require_tf class _UpperCAmelCase ( unittest.TestCase): @slow def __snake_case ( self ) -> Any: '''simple docstring''' _UpperCAmelCase : Dict = """bert-base-cased""" _UpperCAmelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase : int = TFAutoModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow def __snake_case ( self ) -> List[str]: '''simple docstring''' _UpperCAmelCase : List[str] = """bert-base-cased""" _UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase : List[str] = TFAutoModelForPreTraining.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow def __snake_case ( self ) -> Optional[int]: '''simple docstring''' for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : Tuple = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(__lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(__lowerCAmelCase , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow def __snake_case ( self ) -> Union[str, Any]: '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase : Tuple = TFAutoModelWithLMHead.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow def __snake_case ( self ) -> Dict: '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : Dict = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(__lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(__lowerCAmelCase , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow def __snake_case ( self ) -> str: '''simple docstring''' for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase : int = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow def __snake_case ( self ) -> List[str]: '''simple docstring''' for model_name in ["bert-base-uncased"]: _UpperCAmelCase : Dict = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase : Tuple = TFAutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow def __snake_case ( self ) -> Optional[int]: '''simple docstring''' for model_name in ["bert-base-uncased"]: _UpperCAmelCase : Dict = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase : Union[str, Any] = TFAutoModelForQuestionAnswering.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow @require_tensorflow_probability def __snake_case ( self ) -> Optional[int]: '''simple docstring''' for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: _UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase : str = TFAutoModelForTableQuestionAnswering.from_pretrained(__lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = TFAutoModelForTableQuestionAnswering.from_pretrained( __lowerCAmelCase , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) def __snake_case ( self ) -> List[str]: '''simple docstring''' _UpperCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__lowerCAmelCase ) , 1_44_10 ) def __snake_case ( self ) -> Any: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__lowerCAmelCase ) , 1_44_10 ) def __snake_case ( self ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase : Dict = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase : Dict = copy.deepcopy(model.config ) _UpperCAmelCase : Dict = ["""FunnelBaseModel"""] _UpperCAmelCase : Any = TFAutoModel.from_config(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__lowerCAmelCase ) _UpperCAmelCase : Union[str, Any] = TFAutoModel.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) def __snake_case ( self ) -> Tuple: '''simple docstring''' try: AutoConfig.register("""new-model""" , __lowerCAmelCase ) _UpperCAmelCase : List[Any] = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(__lowerCAmelCase ): auto_class.register(__lowerCAmelCase , __lowerCAmelCase ) auto_class.register(__lowerCAmelCase , __lowerCAmelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__lowerCAmelCase ): auto_class.register(__lowerCAmelCase , __lowerCAmelCase ) # Now that the config is registered, it can be used as any other config with the auto-API _UpperCAmelCase : Union[str, Any] = BertModelTester(self ).get_config() _UpperCAmelCase : int = NewModelConfig(**tiny_config.to_dict() ) _UpperCAmelCase : Dict = auto_class.from_config(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__lowerCAmelCase ) _UpperCAmelCase : int = auto_class.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def __snake_case ( self ) -> Any: '''simple docstring''' with self.assertRaisesRegex( __lowerCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ): _UpperCAmelCase : int = TFAutoModel.from_pretrained("""bert-base""" ) def __snake_case ( self ) -> Union[str, Any]: '''simple docstring''' with self.assertRaisesRegex( __lowerCAmelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): _UpperCAmelCase : Tuple = TFAutoModel.from_pretrained(__lowerCAmelCase , revision="""aaaaaa""" ) def __snake_case ( self ) -> List[Any]: '''simple docstring''' with self.assertRaisesRegex( __lowerCAmelCase , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ): _UpperCAmelCase : Any = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" ) def __snake_case ( self ) -> Union[str, Any]: '''simple docstring''' with self.assertRaisesRegex(__lowerCAmelCase , """Use `from_pt=True` to load this model""" ): _UpperCAmelCase : int = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" ) def __snake_case ( self ) -> Dict: '''simple docstring''' _UpperCAmelCase : int = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) with RequestCounter() as counter: _UpperCAmelCase : int = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint _UpperCAmelCase : int = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" ) with RequestCounter() as counter: _UpperCAmelCase : Union[str, Any] = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
246
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class a ( unittest.TestCase ): def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Optional[Any]=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : List[str]=400 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=None , __lowerCAmelCase : List[str]=True , ): _UpperCAmelCase = size if size is not None else {"""shortest_edge""": 20} _UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = do_flip_channel_order def lowerCAmelCase_ ( self : List[str] ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[int] = MobileViTImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = MobileViTImageProcessingTester(self ) @property def lowerCAmelCase_ ( self : Tuple ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_flip_channel_order""" ) ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def lowerCAmelCase_ ( self : List[str] ): pass def lowerCAmelCase_ ( self : Dict ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self : str ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self : Optional[int] ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
289
0
'''simple docstring''' import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml __A = logging.get_logger(__name__) def _A ( lowercase__ , lowercase__ ): def run_func(lowercase__ ): @wraps(lowercase__ ) def run_in_eager_mode(*lowercase__ , **lowercase__ ): return func(*lowercase__ , **lowercase__ ) @wraps(lowercase__ ) @tf.function(experimental_compile=lowercase__ ) def run_in_graph_mode(*lowercase__ , **lowercase__ ): return func(*lowercase__ , **lowercase__ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def _A ( lowercase__ , lowercase__ , lowercase__ ): lowercase__ = random.Random() lowercase__ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(lowercase__ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class A ( lowerCAmelCase_ ): lowerCamelCase : TensorFlowBenchmarkArguments lowerCamelCase : PretrainedConfig lowerCamelCase : str = "TensorFlow" @property def A__ ( self ) -> str: '''simple docstring''' return tf.__version__ def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' lowercase__ = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowercase__ = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_speed(_inference ) def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' lowercase__ = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowercase__ = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_speed(_train ) def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase ) lowercase__ = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowercase__ = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_memory(_inference ) def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]: '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase ) lowercase__ = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowercase__ = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_memory(_train ) def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' lowercase__ = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) lowercase__ = ( hasattr(__lowerCAmelCase , """architectures""" ) and isinstance(config.architectures , __lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: lowercase__ = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model lowercase__ = __import__("""transformers""" , fromlist=[model_class] ) lowercase__ = getattr(__lowerCAmelCase , __lowerCAmelCase ) lowercase__ = model_cls(__lowerCAmelCase ) except ImportError: raise ImportError( F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: lowercase__ = TF_MODEL_MAPPING[config.__class__](__lowerCAmelCase ) # encoder-decoder has vocab size saved differently lowercase__ = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size lowercase__ = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , training=__lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(__lowerCAmelCase , training=__lowerCAmelCase ) lowercase__ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str: '''simple docstring''' lowercase__ = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) lowercase__ = ( hasattr(__lowerCAmelCase , """architectures""" ) and isinstance(config.architectures , __lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: lowercase__ = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model lowercase__ = __import__("""transformers""" , fromlist=[model_class] ) lowercase__ = getattr(__lowerCAmelCase , __lowerCAmelCase ) lowercase__ = model_cls(__lowerCAmelCase ) except ImportError: raise ImportError( F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: lowercase__ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__lowerCAmelCase ) # encoder-decoder has vocab size saved differently lowercase__ = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size lowercase__ = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): lowercase__ = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0] lowercase__ = tf.gradients(__lowerCAmelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): lowercase__ = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0] lowercase__ = tf.gradients(__lowerCAmelCase , model.trainable_variables ) return gradients lowercase__ = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def A__ ( self , lowerCamelCase__ ) -> Any: '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(__lowerCAmelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average lowercase__ = timeit.repeat( __lowerCAmelCase , repeat=self.args.repeat , number=10 , ) return min(__lowerCAmelCase ) / 10.0 except ResourceExhaustedError as e: self.print_fn(F'''Doesn\'t fit on GPU. {e}''' ) def A__ ( self , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) lowercase__ = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) lowercase__ = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() lowercase__ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) lowercase__ = nvml.nvmlDeviceGetMemoryInfo(__lowerCAmelCase ) lowercase__ = meminfo.used lowercase__ = Memory(__lowerCAmelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) lowercase__ = None else: lowercase__ = measure_peak_memory_cpu(__lowerCAmelCase ) lowercase__ = Memory(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else memory_bytes if self.args.trace_memory_line_by_line: lowercase__ = stop_memory_tracing(__lowerCAmelCase ) if memory is None: lowercase__ = summary.total else: lowercase__ = None return memory, summary except ResourceExhaustedError as e: self.print_fn(F'''Doesn\'t fit on GPU. {e}''' ) return "N/A", None
164
"""simple docstring""" from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : Any = 'efficientnet' def __init__( self : Any , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 600 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2560 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.001 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : List[Any] , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = width_coefficient _UpperCAmelCase = depth_coefficient _UpperCAmelCase = depth_divisor _UpperCAmelCase = kernel_sizes _UpperCAmelCase = in_channels _UpperCAmelCase = out_channels _UpperCAmelCase = depthwise_padding _UpperCAmelCase = strides _UpperCAmelCase = num_block_repeats _UpperCAmelCase = expand_ratios _UpperCAmelCase = squeeze_expansion_ratio _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dim _UpperCAmelCase = pooling_type _UpperCAmelCase = initializer_range _UpperCAmelCase = batch_norm_eps _UpperCAmelCase = batch_norm_momentum _UpperCAmelCase = dropout_rate _UpperCAmelCase = drop_connect_rate _UpperCAmelCase = sum(__lowerCAmelCase ) * 4 class a ( lowerCAmelCase_ ): _snake_case : Dict = version.parse('1.11' ) @property def lowerCAmelCase_ ( self : Any ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase_ ( self : int ): return 1e-5
289
0
"""simple docstring""" def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): def update_area_of_max_square(lowerCamelCase , lowerCamelCase ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 UpperCAmelCase__ = update_area_of_max_square(lowerCamelCase , col + 1 ) UpperCAmelCase__ = update_area_of_max_square(row + 1 , col + 1 ) UpperCAmelCase__ = update_area_of_max_square(row + 1 , lowerCamelCase ) if mat[row][col]: UpperCAmelCase__ = 1 + min([right, diagonal, down] ) UpperCAmelCase__ = max(largest_square_area[0] , lowerCamelCase ) return sub_problem_sol else: return 0 UpperCAmelCase__ = [0] update_area_of_max_square(0 , 0 ) return largest_square_area[0] def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): def update_area_of_max_square_using_dp_array( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] UpperCAmelCase__ = update_area_of_max_square_using_dp_array(lowerCamelCase , col + 1 , lowerCamelCase ) UpperCAmelCase__ = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowerCamelCase ) UpperCAmelCase__ = update_area_of_max_square_using_dp_array(row + 1 , lowerCamelCase , lowerCamelCase ) if mat[row][col]: UpperCAmelCase__ = 1 + min([right, diagonal, down] ) UpperCAmelCase__ = max(largest_square_area[0] , lowerCamelCase ) UpperCAmelCase__ = sub_problem_sol return sub_problem_sol else: return 0 UpperCAmelCase__ = [0] UpperCAmelCase__ = [[-1] * cols for _ in range(lowerCamelCase )] update_area_of_max_square_using_dp_array(0 , 0 , lowerCamelCase ) return largest_square_area[0] def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = [[0] * (cols + 1) for _ in range(rows + 1 )] UpperCAmelCase__ = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): UpperCAmelCase__ = dp_array[row][col + 1] UpperCAmelCase__ = dp_array[row + 1][col + 1] UpperCAmelCase__ = dp_array[row + 1][col] if mat[row][col] == 1: UpperCAmelCase__ = 1 + min(lowerCamelCase , lowerCamelCase , lowerCamelCase ) UpperCAmelCase__ = max(dp_array[row][col] , lowerCamelCase ) else: UpperCAmelCase__ = 0 return largest_square_area def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = [0] * (cols + 1) UpperCAmelCase__ = [0] * (cols + 1) UpperCAmelCase__ = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): UpperCAmelCase__ = current_row[col + 1] UpperCAmelCase__ = next_row[col + 1] UpperCAmelCase__ = next_row[col] if mat[row][col] == 1: UpperCAmelCase__ = 1 + min(lowerCamelCase , lowerCamelCase , lowerCamelCase ) UpperCAmelCase__ = max(current_row[col] , lowerCamelCase ) else: UpperCAmelCase__ = 0 UpperCAmelCase__ = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
98
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class a : def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : str=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Optional[int]=16 , __lowerCAmelCase : Dict=36 , __lowerCAmelCase : Optional[Any]=6 , __lowerCAmelCase : List[str]=6 , __lowerCAmelCase : Union[str, Any]=6 , __lowerCAmelCase : str=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Any=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = embedding_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_hidden_groups _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Union[str, Any] ): return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Any ): _UpperCAmelCase = AlbertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ): _UpperCAmelCase = AlbertForPreTraining(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , sentence_order_label=__lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ): _UpperCAmelCase = AlbertForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ): _UpperCAmelCase = AlbertForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = AlbertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = AlbertForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = AlbertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : str = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) _snake_case : Tuple = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) _snake_case : Dict = True def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any]=False ): _UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): _UpperCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase ) _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = AlbertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Optional[int] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCAmelCase = type self.model_tester.create_and_check_model(*__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = AlbertModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_torch class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = AlbertModel.from_pretrained("""albert-base-v2""" ) _UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0] _UpperCAmelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = torch.tensor( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
289
0
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
206
"""simple docstring""" UpperCAmelCase__ = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" # Return True if there is node that has not iterated. _UpperCAmelCase = [False] * len(lowercase ) _UpperCAmelCase = [s] _UpperCAmelCase = True while queue: _UpperCAmelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase ) _UpperCAmelCase = True _UpperCAmelCase = u return visited[t] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = [-1] * (len(lowercase )) _UpperCAmelCase = 0 _UpperCAmelCase = [] _UpperCAmelCase = [i[:] for i in graph] # Record original cut, copy. while bfs(lowercase ,lowercase ,lowercase ,lowercase ): _UpperCAmelCase = float("""Inf""" ) _UpperCAmelCase = sink while s != source: # Find the minimum value in select path _UpperCAmelCase = min(lowercase ,graph[parent[s]][s] ) _UpperCAmelCase = parent[s] max_flow += path_flow _UpperCAmelCase = sink while v != source: _UpperCAmelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCAmelCase = parent[v] for i in range(len(lowercase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
289
0
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging __a = logging.get_logger(__name__) class lowerCamelCase ( lowerCAmelCase_ ): '''simple docstring''' _A : str = ['input_features', 'attention_mask'] def __init__( self: Tuple , snake_case: Any=80 , snake_case: Union[str, Any]=16_000 , snake_case: Optional[int]=0.0 , snake_case: Union[str, Any]=10 , snake_case: Optional[int]=25 , snake_case: Tuple="hamming_window" , snake_case: Tuple=32_768.0 , snake_case: List[Any]=0.9_7 , snake_case: List[Any]=1.0 , snake_case: List[Any]=True , snake_case: int=True , snake_case: Optional[int]=False , **snake_case: List[str] , ) -> List[str]: super().__init__(feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase ) snake_case_ :List[str] = feature_size snake_case_ :int = sampling_rate snake_case_ :List[str] = padding_value snake_case_ :Optional[int] = hop_length snake_case_ :List[Any] = win_length snake_case_ :Any = frame_signal_scale snake_case_ :Union[str, Any] = preemphasis_coeff snake_case_ :Dict = mel_floor snake_case_ :List[str] = normalize_means snake_case_ :Tuple = normalize_vars snake_case_ :Union[str, Any] = win_function snake_case_ :Optional[Any] = return_attention_mask snake_case_ :Tuple = win_length * sampling_rate // 1_000 snake_case_ :int = hop_length * sampling_rate // 1_000 snake_case_ :List[Any] = optimal_fft_length(self.sample_size ) snake_case_ :Union[str, Any] = (self.n_fft // 2) + 1 def lowerCAmelCase_ ( self: Optional[int] , snake_case: np.array ) -> Tuple: if self.win_function == "hamming_window": snake_case_ :Optional[Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=__lowerCAmelCase ) else: snake_case_ :List[Any] = window_function(window_length=self.sample_size , name=self.win_function ) snake_case_ :Tuple = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) snake_case_ :Dict = spectrogram( one_waveform * self.frame_signal_scale , window=__lowerCAmelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__lowerCAmelCase , preemphasis=self.preemphasis_coeff , mel_filters=__lowerCAmelCase , mel_floor=self.mel_floor , log_mel="""log""" , ) return msfc_features.T def lowerCAmelCase_ ( self: List[Any] , snake_case: List[Any] , snake_case: int , snake_case: int ) -> str: # make sure we normalize float32 arrays if self.normalize_means: snake_case_ :Union[str, Any] = x[:input_length].mean(axis=0 ) snake_case_ :int = np.subtract(__lowerCAmelCase , __lowerCAmelCase ) if self.normalize_vars: snake_case_ :Optional[int] = x[:input_length].std(axis=0 ) snake_case_ :str = np.divide(__lowerCAmelCase , __lowerCAmelCase ) if input_length < x.shape[0]: snake_case_ :str = padding_value # make sure array is in float32 snake_case_ :List[Any] = x.astype(np.floataa ) return x def lowerCAmelCase_ ( self: Optional[int] , snake_case: List[np.ndarray] , snake_case: Optional[np.ndarray] = None ) -> List[Any]: snake_case_ :Dict = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(__lowerCAmelCase , __lowerCAmelCase , self.padding_value ) for x, n in zip(__lowerCAmelCase , __lowerCAmelCase )] def __call__( self: Dict , snake_case: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case: Union[bool, str, PaddingStrategy] = False , snake_case: Optional[int] = None , snake_case: bool = False , snake_case: Optional[int] = None , snake_case: Optional[bool] = None , snake_case: Optional[Union[str, TensorType]] = None , snake_case: Optional[int] = None , **snake_case: Optional[int] , ) -> Union[str, Any]: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( """It is strongly recommended to pass the ``sampling_rate`` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) snake_case_ :Optional[int] = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) snake_case_ :Tuple = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: snake_case_ :Union[str, Any] = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ): snake_case_ :int = np.asarray(__lowerCAmelCase , dtype=np.floataa ) elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): snake_case_ :Tuple = raw_speech.astype(np.floataa ) # always return batch if not is_batched: snake_case_ :Optional[Any] = [raw_speech] # extract fbank features snake_case_ :Optional[Any] = [self._extract_mfsc_features(__lowerCAmelCase ) for one_waveform in raw_speech] # convert into correct format for padding snake_case_ :Any = BatchFeature({"""input_features""": features} ) snake_case_ :List[str] = self.pad( __lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , ) # make sure list is in array format snake_case_ :Optional[Any] = padded_inputs.get("""input_features""" ) if isinstance(input_features[0] , __lowerCAmelCase ): snake_case_ :Union[str, Any] = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in input_features] snake_case_ :str = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: snake_case_ :Tuple = [np.asarray(__lowerCAmelCase , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: snake_case_ :int = ( np.array(__lowerCAmelCase , dtype=np.intaa ) if self._get_padding_strategies(__lowerCAmelCase , max_length=__lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) snake_case_ :Tuple = self.normalize( padded_inputs["""input_features"""] , attention_mask=__lowerCAmelCase ) if return_tensors is not None: snake_case_ :List[str] = padded_inputs.convert_to_tensors(__lowerCAmelCase ) return padded_inputs
66
"""simple docstring""" import math class a : def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ): _UpperCAmelCase = 0.0 _UpperCAmelCase = 0.0 for i in range(len(__lowerCAmelCase ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : list[list[int | float]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : float ): for i in range(len(__lowerCAmelCase ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def __UpperCAmelCase ( ): """simple docstring""" # Training Examples ( m, n ) _UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _UpperCAmelCase = SelfOrganizingMap() _UpperCAmelCase = 3 _UpperCAmelCase = 0.5 for _ in range(lowercase ): for j in range(len(lowercase ) ): # training sample _UpperCAmelCase = training_samples[j] # Compute the winning vector _UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase ) # Update the winning vector _UpperCAmelCase = self_organizing_map.update(lowercase ,lowercase ,lowercase ,lowercase ) # classify test sample _UpperCAmelCase = [0, 0, 0, 1] _UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase ) # results print(f'''Clusters that the test sample belongs to : {winner}''' ) print(f'''Weights that have been trained : {weights}''' ) # running the main() function if __name__ == "__main__": main()
289
0
'''simple docstring''' from string import ascii_uppercase __snake_case = {char: i for i, char in enumerate(ascii_uppercase)} __snake_case = dict(enumerate(ascii_uppercase)) def a ( __a , __a ) -> List[str]: '''simple docstring''' UpperCamelCase__ :Dict = len(__a ) UpperCamelCase__ :List[Any] = 0 while True: if x == i: UpperCamelCase__ :List[Any] = 0 if len(__a ) == len(__a ): break key += key[i] i += 1 return key def a ( __a , __a ) -> str: '''simple docstring''' UpperCamelCase__ :int = '''''' UpperCamelCase__ :int = 0 for letter in message: if letter == " ": cipher_text += " " else: UpperCamelCase__ :int = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def a ( __a , __a ) -> Any: '''simple docstring''' UpperCamelCase__ :List[Any] = '''''' UpperCamelCase__ :Optional[Any] = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: UpperCamelCase__ :Dict = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def a ( ) -> Tuple: '''simple docstring''' UpperCamelCase__ :Any = '''THE GERMAN ATTACK''' UpperCamelCase__ :List[Any] = '''SECRET''' UpperCamelCase__ :Optional[int] = generate_key(__a , __a ) UpperCamelCase__ :Optional[int] = cipher_text(__a , __a ) print(f'''Encrypted Text = {s}''' ) print(f'''Original Text = {original_text(__a , __a )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
97
"""simple docstring""" import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class a : def __init__( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=99 , __lowerCAmelCase : int=64 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Union[str, Any]=64 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : str=512 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : str=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def lowerCAmelCase_ ( self : Union[str, Any] ): return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Optional[int] ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ): _UpperCAmelCase = MPNetModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ): _UpperCAmelCase = MPNetForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = MPNetForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = MPNetForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = MPNetForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.prepare_config_and_inputs() ((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : List[Any] = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) _snake_case : Union[str, Any] = ( { 'feature-extraction': MPNetModel, 'fill-mask': MPNetForMaskedLM, 'question-answering': MPNetForQuestionAnswering, 'text-classification': MPNetForSequenceClassification, 'token-classification': MPNetForTokenClassification, 'zero-shot': MPNetForSequenceClassification, } if is_torch_available() else {} ) _snake_case : int = False _snake_case : List[Any] = True def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = MPNetModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Dict ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*__lowerCAmelCase ) @require_torch class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = MPNetModel.from_pretrained("""microsoft/mpnet-base""" ) _UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCAmelCase = model(__lowerCAmelCase )[0] _UpperCAmelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = torch.tensor( [[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
289
0
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A_ ( lowerCAmelCase_ ): _lowercase : Optional[int] = (DPMSolverSinglestepScheduler,) _lowercase : Optional[Any] = (('num_inference_steps', 2_5),) def UpperCAmelCase ( self : Any , **UpperCAmelCase : Any ) -> List[Any]: __lowerCAmelCase: Optional[int] = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'solver_order': 2, 'prediction_type': 'epsilon', 'thresholding': False, 'sample_max_value': 1.0, 'algorithm_type': 'dpmsolver++', 'solver_type': 'midpoint', 'lambda_min_clipped': -float('inf' ), 'variance_type': None, } config.update(**__lowerCAmelCase ) return config def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Tuple=0 , **UpperCAmelCase : str ) -> int: __lowerCAmelCase: List[str] = dict(self.forward_default_kwargs ) __lowerCAmelCase: Any = kwargs.pop('num_inference_steps' , __lowerCAmelCase ) __lowerCAmelCase: int = self.dummy_sample __lowerCAmelCase: List[Any] = 0.1 * sample __lowerCAmelCase: Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: __lowerCAmelCase: Optional[Any] = self.get_scheduler_config(**__lowerCAmelCase ) __lowerCAmelCase: str = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals __lowerCAmelCase: Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) __lowerCAmelCase: Tuple = scheduler_class.from_pretrained(__lowerCAmelCase ) new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals __lowerCAmelCase: int = dummy_past_residuals[: new_scheduler.config.solver_order] __lowerCAmelCase , __lowerCAmelCase: int = sample, sample for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ): __lowerCAmelCase: int = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample __lowerCAmelCase: str = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCAmelCase ( self : Any ) -> str: pass def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Tuple=0 , **UpperCAmelCase : str ) -> Optional[Any]: __lowerCAmelCase: int = dict(self.forward_default_kwargs ) __lowerCAmelCase: Tuple = kwargs.pop('num_inference_steps' , __lowerCAmelCase ) __lowerCAmelCase: Any = self.dummy_sample __lowerCAmelCase: int = 0.1 * sample __lowerCAmelCase: str = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: __lowerCAmelCase: str = self.get_scheduler_config() __lowerCAmelCase: Optional[int] = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) __lowerCAmelCase: Any = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) __lowerCAmelCase: Optional[int] = scheduler_class.from_pretrained(__lowerCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residual (must be after setting timesteps) __lowerCAmelCase: Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order] __lowerCAmelCase: str = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample __lowerCAmelCase: Union[str, Any] = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCAmelCase ( self : Dict , UpperCAmelCase : List[str]=None , **UpperCAmelCase : Optional[Any] ) -> Any: if scheduler is None: __lowerCAmelCase: Any = self.scheduler_classes[0] __lowerCAmelCase: List[Any] = self.get_scheduler_config(**__lowerCAmelCase ) __lowerCAmelCase: Optional[Any] = scheduler_class(**__lowerCAmelCase ) __lowerCAmelCase: Union[str, Any] = self.scheduler_classes[0] __lowerCAmelCase: int = self.get_scheduler_config(**__lowerCAmelCase ) __lowerCAmelCase: List[Any] = scheduler_class(**__lowerCAmelCase ) __lowerCAmelCase: List[str] = 1_0 __lowerCAmelCase: Tuple = self.dummy_model() __lowerCAmelCase: List[str] = self.dummy_sample_deter scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): __lowerCAmelCase: int = model(__lowerCAmelCase , __lowerCAmelCase ) __lowerCAmelCase: Optional[int] = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample return sample def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]: __lowerCAmelCase: Optional[Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) __lowerCAmelCase: Union[str, Any] = 5_0 __lowerCAmelCase: Optional[Any] = self.dummy_model() __lowerCAmelCase: Tuple = self.dummy_sample_deter scheduler.set_timesteps(__lowerCAmelCase ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): __lowerCAmelCase: List[Any] = model(__lowerCAmelCase , __lowerCAmelCase ) __lowerCAmelCase: List[Any] = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample __lowerCAmelCase: List[Any] = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2574 ) < 1E-3 def UpperCAmelCase ( self : Dict ) -> Optional[int]: for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__lowerCAmelCase ) def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults __lowerCAmelCase: Optional[Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) __lowerCAmelCase: List[Any] = self.full_loop(scheduler=__lowerCAmelCase ) __lowerCAmelCase: Any = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 __lowerCAmelCase: Dict = DEISMultistepScheduler.from_config(scheduler.config ) __lowerCAmelCase: int = DPMSolverMultistepScheduler.from_config(scheduler.config ) __lowerCAmelCase: int = UniPCMultistepScheduler.from_config(scheduler.config ) __lowerCAmelCase: Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config ) __lowerCAmelCase: List[Any] = self.full_loop(scheduler=__lowerCAmelCase ) __lowerCAmelCase: Tuple = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 def UpperCAmelCase ( self : Tuple ) -> Optional[Any]: self.check_over_configs(thresholding=__lowerCAmelCase ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , algorithm_type='dpmsolver++' , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , ) def UpperCAmelCase ( self : Optional[Any] ) -> Any: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCAmelCase ) def UpperCAmelCase ( self : Any ) -> Any: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , algorithm_type=__lowerCAmelCase , ) __lowerCAmelCase: List[Any] = self.full_loop( solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , algorithm_type=__lowerCAmelCase , ) assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers" def UpperCAmelCase ( self : Tuple ) -> int: self.check_over_configs(lower_order_final=__lowerCAmelCase ) self.check_over_configs(lower_order_final=__lowerCAmelCase ) def UpperCAmelCase ( self : str ) -> Any: self.check_over_configs(lambda_min_clipped=-float('inf' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def UpperCAmelCase ( self : int ) -> Optional[int]: self.check_over_configs(variance_type=__lowerCAmelCase ) self.check_over_configs(variance_type='learned_range' ) def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]: for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 ) def UpperCAmelCase ( self : List[str] ) -> Any: __lowerCAmelCase: Optional[int] = self.full_loop() __lowerCAmelCase: List[Any] = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 def UpperCAmelCase ( self : Tuple ) -> int: __lowerCAmelCase: List[str] = self.full_loop(use_karras_sigmas=__lowerCAmelCase ) __lowerCAmelCase: List[Any] = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2248 ) < 1E-3 def UpperCAmelCase ( self : int ) -> Optional[int]: __lowerCAmelCase: List[Any] = self.full_loop(prediction_type='v_prediction' ) __lowerCAmelCase: str = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.1453 ) < 1E-3 def UpperCAmelCase ( self : Dict ) -> Any: __lowerCAmelCase: str = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=__lowerCAmelCase ) __lowerCAmelCase: Optional[int] = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.0649 ) < 1E-3 def UpperCAmelCase ( self : Dict ) -> Optional[Any]: __lowerCAmelCase: Tuple = self.scheduler_classes[0] __lowerCAmelCase: str = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 ) __lowerCAmelCase: Any = scheduler_class(**__lowerCAmelCase ) __lowerCAmelCase: Union[str, Any] = 1_0 __lowerCAmelCase: List[Any] = self.dummy_model() __lowerCAmelCase: List[str] = self.dummy_sample_deter.half() scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): __lowerCAmelCase: List[str] = model(__lowerCAmelCase , __lowerCAmelCase ) __lowerCAmelCase: int = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample assert sample.dtype == torch.floataa
322
"""simple docstring""" UpperCAmelCase__ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) UpperCAmelCase__ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 1_2, """Pm""": 1_5, """Em""": 1_8, """Zm""": 2_1, """Ym""": 2_4, } def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = from_type.lower().strip("""s""" ) _UpperCAmelCase = to_type.lower().strip("""s""" ) _UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase ) _UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase ) if from_sanitized not in METRIC_CONVERSION: _UpperCAmelCase = ( f'''Invalid \'from_type\' value: {from_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) if to_sanitized not in METRIC_CONVERSION: _UpperCAmelCase = ( f'''Invalid \'to_type\' value: {to_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) _UpperCAmelCase = METRIC_CONVERSION[from_sanitized] _UpperCAmelCase = METRIC_CONVERSION[to_sanitized] _UpperCAmelCase = 1 if from_exponent > to_exponent: _UpperCAmelCase = from_exponent - to_exponent else: _UpperCAmelCase = -(to_exponent - from_exponent) return value * pow(10 ,lowercase ) if __name__ == "__main__": from doctest import testmod testmod()
289
0
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] __lowerCAmelCase = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def snake_case_ ( snake_case ) -> Optional[int]: lowercase__: Optional[Any] = torch.load(snake_case , map_location='cpu' ) return sd def snake_case_ ( snake_case , snake_case , snake_case=rename_keys_prefix ) -> List[Any]: lowercase__: List[str] = OrderedDict() lowercase__: Optional[Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue lowercase__: int = key for name_pair in rename_keys_prefix: lowercase__: int = new_key.replace(name_pair[0] , name_pair[1] ) lowercase__: Optional[Any] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately lowercase__: Tuple = new_d['cls.predictions.bias'] return new_d @torch.no_grad() def snake_case_ ( snake_case , snake_case ) -> int: assert ( checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS ), f'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.' # Get Config if "pre" in checkpoint_path: lowercase__: str = 'pretraining' if "vcr" in checkpoint_path: lowercase__: Any = {'visual_embedding_dim': 5_12} elif "vqa_advanced" in checkpoint_path: lowercase__: Union[str, Any] = {'visual_embedding_dim': 20_48} elif "vqa" in checkpoint_path: lowercase__: Union[str, Any] = {'visual_embedding_dim': 20_48} elif "nlvr" in checkpoint_path: lowercase__: str = {'visual_embedding_dim': 10_24} else: raise NotImplementedError(f'No implementation found for `{checkpoint_path}`.' ) else: if "vcr" in checkpoint_path: lowercase__: str = {'visual_embedding_dim': 5_12} lowercase__: Dict = 'multichoice' elif "vqa_advanced" in checkpoint_path: lowercase__: str = {'visual_embedding_dim': 20_48} lowercase__: Dict = 'vqa_advanced' elif "vqa" in checkpoint_path: lowercase__: int = {'visual_embedding_dim': 20_48, 'num_labels': 31_29} lowercase__: Optional[int] = 'vqa' elif "nlvr" in checkpoint_path: lowercase__: List[Any] = { 'visual_embedding_dim': 10_24, 'num_labels': 2, } lowercase__: Dict = 'nlvr' lowercase__: List[str] = VisualBertConfig(**snake_case ) # Load State Dict lowercase__: Optional[int] = load_state_dict(snake_case ) lowercase__: str = get_new_dict(snake_case , snake_case ) if model_type == "pretraining": lowercase__: Tuple = VisualBertForPreTraining(snake_case ) elif model_type == "vqa": lowercase__: List[Any] = VisualBertForQuestionAnswering(snake_case ) elif model_type == "nlvr": lowercase__: Tuple = VisualBertForVisualReasoning(snake_case ) elif model_type == "multichoice": lowercase__: List[Any] = VisualBertForMultipleChoice(snake_case ) model.load_state_dict(snake_case ) # Save Checkpoints Path(snake_case ).mkdir(exist_ok=snake_case ) model.save_pretrained(snake_case ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') __lowerCAmelCase = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
196
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase__ = 1_6 UpperCAmelCase__ = 3_2 def __UpperCAmelCase ( lowercase ,lowercase = 16 ): """simple docstring""" _UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) _UpperCAmelCase = DataLoader( tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCAmelCase__ = mocked_dataloaders # noqa: F811 def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1": _UpperCAmelCase = 2 # Initialize accelerator _UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config["""lr"""] _UpperCAmelCase = int(config["""num_epochs"""] ) _UpperCAmelCase = int(config["""seed"""] ) _UpperCAmelCase = int(config["""batch_size"""] ) _UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowercase ) def inner_training_loop(lowercase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(lowercase ,lowercase ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowercase ,references=lowercase ,) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' ,lowercase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" ,) parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowercase ,lowercase ) if __name__ == "__main__": main()
289
0
'''simple docstring''' import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def UpperCamelCase_( snake_case : int=None , snake_case : Any=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=snake_case ) @dataclass class _snake_case : lowerCAmelCase_ : str = field( metadata={"help": "The csv file to plot."} , ) lowerCAmelCase_ : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , ) lowerCAmelCase_ : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , ) lowerCAmelCase_ : bool = field( default=lowerCAmelCase_ , metadata={"help": "Disable logarithmic scale when plotting"} , ) lowerCAmelCase_ : bool = field( default=lowerCAmelCase_ , metadata={ "help": "Whether the csv file has training results or inference results. Defaults to inference results." } , ) lowerCAmelCase_ : Optional[str] = field( default=lowerCAmelCase_ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , ) lowerCAmelCase_ : Optional[List[str]] = list_field( default=lowerCAmelCase_ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} ) def UpperCamelCase_( snake_case : Optional[int] ): '''simple docstring''' try: int(snake_case ) return True except ValueError: return False def UpperCamelCase_( snake_case : int ): '''simple docstring''' try: float(snake_case ) return True except ValueError: return False class _snake_case : def __init__( self , a__ ) -> int: '''simple docstring''' snake_case_ = args snake_case_ = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline="" ) as csv_file: snake_case_ = csv.DictReader(__lowerCAmelCase ) for row in reader: snake_case_ = row["model"] self.result_dict[model_name]["bsz"].append(int(row["batch_size"] ) ) self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"] ) ) if can_convert_to_int(row["result"] ): # value is not None snake_case_ = int(row["result"] ) elif can_convert_to_float(row["result"] ): # value is not None snake_case_ = float(row["result"] ) def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' snake_case_ , snake_case_ = plt.subplots() snake_case_ = "Time usage" if self.args.is_time else "Memory usage" snake_case_ = title_str + " for training" if self.args.is_train else title_str + " for inference" if not self.args.no_log_scale: # set logarithm scales ax.set_xscale("log" ) ax.set_yscale("log" ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): snake_case_ = sorted(set(self.result_dict[model_name]["bsz"] ) ) snake_case_ = sorted(set(self.result_dict[model_name]["seq_len"] ) ) snake_case_ = self.result_dict[model_name]["result"] ((snake_case_) , (snake_case_)) = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) snake_case_ = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: snake_case_ = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCAmelCase , ) else: snake_case_ = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((snake_case_) , (snake_case_)) = ( ("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz") ) snake_case_ = np.asarray(__lowerCAmelCase , __lowerCAmelCase )[: len(__lowerCAmelCase )] plt.scatter( __lowerCAmelCase , __lowerCAmelCase , label=F'{label_model_name} - {inner_loop_label}: {inner_loop_value}' ) plt.plot(__lowerCAmelCase , __lowerCAmelCase , "--" ) title_str += F' {label_model_name} vs.' snake_case_ = title_str[:-4] snake_case_ = "Time in s" if self.args.is_time else "Memory in MB" # plot plt.title(__lowerCAmelCase ) plt.xlabel(__lowerCAmelCase ) plt.ylabel(__lowerCAmelCase ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def UpperCamelCase_( ): '''simple docstring''' snake_case_ = HfArgumentParser(snake_case ) snake_case_ = parser.parse_args_into_dataclasses()[0] snake_case_ = Plot(args=snake_case ) plot.plot() if __name__ == "__main__": main()
85
"""simple docstring""" import warnings warnings.warn( """memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """ """`from accelerate import find_executable_batch_size` to avoid this warning.""", FutureWarning, )
289
0
"""simple docstring""" from numpy import exp, pi, sqrt def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] = 0.0 , SCREAMING_SNAKE_CASE : Optional[Any] = 1.0 ): '''simple docstring''' return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
46
"""simple docstring""" import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin UpperCAmelCase__ = logging.get_logger(__name__) enable_full_determinism() class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[int] = UNetaDModel _snake_case : List[str] = 'sample' @property def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (32, 32) _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : List[Any] ): return (3, 32, 32) @property def lowerCAmelCase_ ( self : Optional[Any] ): return (3, 32, 32) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = { """block_out_channels""": (32, 64), """down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""), """up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""), """attention_head_dim""": 3, """out_channels""": 3, """in_channels""": 3, """layers_per_block""": 2, """sample_size""": 32, } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : int = UNetaDModel _snake_case : Optional[Any] = 'sample' @property def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = 4 _UpperCAmelCase = 4 _UpperCAmelCase = (32, 32) _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : Optional[Any] ): return (4, 32, 32) @property def lowerCAmelCase_ ( self : Dict ): return (4, 32, 32) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = { """sample_size""": 32, """in_channels""": 4, """out_channels""": 4, """layers_per_block""": 2, """block_out_channels""": (32, 64), """attention_head_dim""": 32, """down_block_types""": ("""DownBlock2D""", """DownBlock2D"""), """up_block_types""": ("""UpBlock2D""", """UpBlock2D"""), } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) _UpperCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) model.to(__lowerCAmelCase ) _UpperCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self : str ): # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) model_accelerate.to(__lowerCAmelCase ) model_accelerate.eval() _UpperCAmelCase = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) _UpperCAmelCase = noise.to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase ) _UpperCAmelCase = model_accelerate(__lowerCAmelCase , __lowerCAmelCase )["""sample"""] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained( """fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase , low_cpu_mem_usage=__lowerCAmelCase ) model_normal_load.to(__lowerCAmelCase ) model_normal_load.eval() _UpperCAmelCase = model_normal_load(__lowerCAmelCase , __lowerCAmelCase )["""sample"""] assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ) model.eval() model.to(__lowerCAmelCase ) _UpperCAmelCase = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) _UpperCAmelCase = noise.to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) ) class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[Any] = UNetaDModel _snake_case : str = 'sample' @property def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str=(32, 32) ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : Any ): return (3, 32, 32) @property def lowerCAmelCase_ ( self : Union[str, Any] ): return (3, 32, 32) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = { """block_out_channels""": [32, 64, 64, 64], """in_channels""": 3, """layers_per_block""": 1, """out_channels""": 3, """time_embedding_type""": """fourier""", """norm_eps""": 1e-6, """mid_block_scale_factor""": math.sqrt(2.0 ), """norm_num_groups""": None, """down_block_types""": [ """SkipDownBlock2D""", """AttnSkipDownBlock2D""", """SkipDownBlock2D""", """SkipDownBlock2D""", ], """up_block_types""": [ """SkipUpBlock2D""", """SkipUpBlock2D""", """AttnSkipUpBlock2D""", """SkipUpBlock2D""", ], } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict @slow def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) _UpperCAmelCase = self.dummy_input _UpperCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(__lowerCAmelCase ) _UpperCAmelCase = noise _UpperCAmelCase = model(**__lowerCAmelCase ) assert image is not None, "Make sure output is not None" @slow def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ) model.to(__lowerCAmelCase ) _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (256, 256) _UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" ) model.to(__lowerCAmelCase ) _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (32, 32) _UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) def lowerCAmelCase_ ( self : List[str] ): # not required for this model pass
289
0
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def _lowerCAmelCase ( ) -> Optional[Any]: """simple docstring""" snake_case__ : List[str] = { '''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''], '''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''], '''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7], } snake_case__ : Dict = Dataset.from_dict(__lowerCAmelCase ) return dataset class a ( lowerCAmelCase_ ): def __lowerCamelCase ( self :int ): snake_case__ : Optional[int] = get_dataset() snake_case__ : Union[str, Any] = make_duplicate_clusters(__lowerCAmelCase ,0.85 ) self.assertEqual(len(duplicate_clusters[0] ) ,2 ) def __lowerCamelCase ( self :List[Any] ): snake_case__ : Optional[Any] = get_dataset() snake_case__ , snake_case__ : List[str] = deduplicate_dataset(__lowerCAmelCase ) self.assertEqual(len(__lowerCAmelCase ) ,2 ) print(__lowerCAmelCase ) self.assertEqual(duplicate_clusters[0][0]['''copies'''] ,2 ) self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] ,__lowerCAmelCase )
230
"""simple docstring""" import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : int = StableUnCLIPPipeline _snake_case : str = TEXT_TO_IMAGE_PARAMS _snake_case : Any = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS _snake_case : str = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _snake_case : str = False def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = 32 _UpperCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) _UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=__lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) _UpperCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__lowerCAmelCase , num_layers=1 , ) torch.manual_seed(0 ) _UpperCAmelCase = DDPMScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowerCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , ) # regular denoising components torch.manual_seed(0 ) _UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=__lowerCAmelCase ) _UpperCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) _UpperCAmelCase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCAmelCase , layers_per_block=1 , upcast_attention=__lowerCAmelCase , use_linear_projection=__lowerCAmelCase , ) torch.manual_seed(0 ) _UpperCAmelCase = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , ) torch.manual_seed(0 ) _UpperCAmelCase = AutoencoderKL() _UpperCAmelCase = { # prior components """prior_tokenizer""": prior_tokenizer, """prior_text_encoder""": prior_text_encoder, """prior""": prior, """prior_scheduler""": prior_scheduler, # image noising components """image_normalizer""": image_normalizer, """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder, """unet""": unet, """scheduler""": scheduler, """vae""": vae, } return components def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=0 ): if str(__lowerCAmelCase ).startswith("""mps""" ): _UpperCAmelCase = torch.manual_seed(__lowerCAmelCase ) else: _UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _UpperCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """prior_num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = torch_device == """cpu""" self._test_attention_slicing_forward_pass(test_max_difference=__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=__lowerCAmelCase ) @slow @require_torch_gpu class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : str ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" ) _UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) _UpperCAmelCase = pipe("""anime turle""" , generator=__lowerCAmelCase , output_type="""np""" ) _UpperCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) _UpperCAmelCase = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _UpperCAmelCase = pipe( """anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , ) _UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
289
0
"""simple docstring""" from __future__ import annotations def UpperCamelCase ( _lowerCAmelCase : int ) -> Union[str, Any]: create_state_space_tree(_lowerCAmelCase, [], 0, [0 for i in range(len(_lowerCAmelCase ) )] ) def UpperCamelCase ( _lowerCAmelCase : Any, _lowerCAmelCase : List[Any], _lowerCAmelCase : str, _lowerCAmelCase : Any, ) -> Any: if index == len(_lowerCAmelCase ): print(_lowerCAmelCase ) return for i in range(len(_lowerCAmelCase ) ): if not index_used[i]: current_sequence.append(sequence[i] ) _UpperCAmelCase : Union[str, Any] = True create_state_space_tree(_lowerCAmelCase, _lowerCAmelCase, index + 1, _lowerCAmelCase ) current_sequence.pop() _UpperCAmelCase : List[Any] = False lowerCamelCase__ : Optional[int] = [3, 1, 2, 4] generate_all_permutations(sequence) lowerCamelCase__ : Optional[int] = ['''A''', '''B''', '''C'''] generate_all_permutations(sequence_a)
246
"""simple docstring""" from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
289
0
'''simple docstring''' import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class A ( lowerCAmelCase_ ): lowerCamelCase : Dict = (KDPMaDiscreteScheduler,) lowerCamelCase : Tuple = 10 def A__ ( self , **lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' lowercase__ = { """num_train_timesteps""": 1_100, """beta_start""": 0.00_01, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**__lowerCAmelCase ) return config def A__ ( self ) -> Any: '''simple docstring''' for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=__lowerCAmelCase ) def A__ ( self ) -> Optional[Any]: '''simple docstring''' for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ): self.check_over_configs(beta_start=__lowerCAmelCase , beta_end=__lowerCAmelCase ) def A__ ( self ) -> List[str]: '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__lowerCAmelCase ) def A__ ( self ) -> List[Any]: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCAmelCase ) def A__ ( self ) -> List[Any]: '''simple docstring''' lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(prediction_type="""v_prediction""" ) lowercase__ = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter * scheduler.init_noise_sigma lowercase__ = sample.to(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowercase__ = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase ) lowercase__ = model(__lowerCAmelCase , __lowerCAmelCase ) lowercase__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowercase__ = output.prev_sample lowercase__ = torch.sum(torch.abs(__lowerCAmelCase ) ) lowercase__ = torch.mean(torch.abs(__lowerCAmelCase ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2 assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 4.693_4286_5017_0972e-07 ) < 1e-2 assert abs(result_mean.item() - 0.00_02 ) < 1e-3 def A__ ( self ) -> List[str]: '''simple docstring''' if torch_device == "mps": return lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter * scheduler.init_noise_sigma lowercase__ = sample.to(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowercase__ = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase ) lowercase__ = model(__lowerCAmelCase , __lowerCAmelCase ) lowercase__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowercase__ = output.prev_sample lowercase__ = torch.sum(torch.abs(__lowerCAmelCase ) ) lowercase__ = torch.mean(torch.abs(__lowerCAmelCase ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3 def A__ ( self ) -> List[str]: '''simple docstring''' if torch_device == "mps": return lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__lowerCAmelCase ) lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter.to(__lowerCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowercase__ = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase ) lowercase__ = model(__lowerCAmelCase , __lowerCAmelCase ) lowercase__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowercase__ = output.prev_sample lowercase__ = torch.sum(torch.abs(__lowerCAmelCase ) ) lowercase__ = torch.mean(torch.abs(__lowerCAmelCase ) ) if str(__lowerCAmelCase ).startswith("""cpu""" ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3
164
"""simple docstring""" import requests UpperCAmelCase__ = """""" # <-- Put your OpenWeatherMap appid here! UpperCAmelCase__ = """https://api.openweathermap.org/data/2.5/""" def __UpperCAmelCase ( lowercase = "Chicago" ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """weather""" ,params=locals() ).json() def __UpperCAmelCase ( lowercase = "Kolkata, India" ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """forecast""" ,params=locals() ).json() def __UpperCAmelCase ( lowercase = 55.68 ,lowercase = 12.57 ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """onecall""" ,params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: UpperCAmelCase__ = input("""Enter a location:""").strip() if location: pprint(current_weather(location)) else: break
289
0
"""simple docstring""" import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__) def a_ ( lowerCamelCase=None , lowerCamelCase=None ): return field(default_factory=lambda: default , metadata=lowerCamelCase ) @dataclass class snake_case : """simple docstring""" snake_case__ = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) snake_case__ = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) snake_case__ = list_field( default=[8, 32, 1_28, 5_12] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) snake_case__ = field( default=lowerCAmelCase_ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) snake_case__ = field( default=lowerCAmelCase_ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) snake_case__ = field( default=lowerCAmelCase_ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) snake_case__ = field(default=lowerCAmelCase_ , metadata={"help": "Use FP16 to accelerate inference."} ) snake_case__ = field(default=lowerCAmelCase_ , metadata={"help": "Benchmark training of model"} ) snake_case__ = field(default=lowerCAmelCase_ , metadata={"help": "Verbose memory tracing"} ) snake_case__ = field( default=lowerCAmelCase_ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) snake_case__ = field( default=lowerCAmelCase_ , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) snake_case__ = field(default=lowerCAmelCase_ , metadata={"help": "Trace memory line by line"} ) snake_case__ = field(default=lowerCAmelCase_ , metadata={"help": "Save result to a CSV file"} ) snake_case__ = field(default=lowerCAmelCase_ , metadata={"help": "Save all print statements in a log file"} ) snake_case__ = field(default=lowerCAmelCase_ , metadata={"help": "Whether to print environment information"} ) snake_case__ = field( default=lowerCAmelCase_ , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) snake_case__ = field( default=F'inference_time_{round(time() )}.csv' , metadata={"help": "CSV filename used if saving time results to csv."} , ) snake_case__ = field( default=F'inference_memory_{round(time() )}.csv' , metadata={"help": "CSV filename used if saving memory results to csv."} , ) snake_case__ = field( default=F'train_time_{round(time() )}.csv' , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) snake_case__ = field( default=F'train_memory_{round(time() )}.csv' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) snake_case__ = field( default=F'env_info_{round(time() )}.csv' , metadata={"help": "CSV filename used if saving environment information."} , ) snake_case__ = field( default=F'log_{round(time() )}.csv' , metadata={"help": "Log filename used if print statements are saved in log."} , ) snake_case__ = field(default=3 , metadata={"help": "Times an experiment will be run."} ) snake_case__ = field( default=lowerCAmelCase_ , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def __lowerCAmelCase ( self : Tuple ): warnings.warn( f'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils''' ' are deprecated in general and it is advised to use external Benchmarking libraries ' ' to benchmark Transformer models.' ,__lowerCAmelCase ,) def __lowerCAmelCase ( self : Union[str, Any] ): return json.dumps(dataclasses.asdict(self ) ,indent=2 ) @property def __lowerCAmelCase ( self : Union[str, Any] ): if len(self.models ) <= 0: raise ValueError( 'Please make sure you provide at least one model name / model identifier, *e.g.* `--models' ' bert-base-cased` or `args.models = [\'bert-base-cased\'].' ) return self.models @property def __lowerCAmelCase ( self : Any ): if not self.multi_process: return False elif self.is_tpu: logger.info('Multiprocessing is currently not possible on TPU.' ) return False else: return True
98
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = get_failure_array(lowercase ) # 2) Step through text searching for pattern _UpperCAmelCase , _UpperCAmelCase = 0, 0 # index into text, pattern while i < len(lowercase ): if pattern[j] == text[i]: if j == (len(lowercase ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: _UpperCAmelCase = failure[j - 1] continue i += 1 return False def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [0] _UpperCAmelCase = 0 _UpperCAmelCase = 1 while j < len(lowercase ): if pattern[i] == pattern[j]: i += 1 elif i > 0: _UpperCAmelCase = failure[i - 1] continue j += 1 failure.append(lowercase ) return failure if __name__ == "__main__": # Test 1) UpperCAmelCase__ = """abc1abc12""" UpperCAmelCase__ = """alskfjaldsabc1abc1abc12k23adsfabcabc""" UpperCAmelCase__ = """alskfjaldsk23adsfabcabc""" assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) UpperCAmelCase__ = """ABABX""" UpperCAmelCase__ = """ABABZABABYABABX""" assert kmp(pattern, text) # Test 3) UpperCAmelCase__ = """AAAB""" UpperCAmelCase__ = """ABAAAAAB""" assert kmp(pattern, text) # Test 4) UpperCAmelCase__ = """abcdabcy""" UpperCAmelCase__ = """abcxabcdabxabcdabcdabcy""" assert kmp(pattern, text) # Test 5) UpperCAmelCase__ = """aabaabaaa""" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
289
0
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowerCamelCase :Tuple = logging.getLogger(__name__) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if os.path.exists(lowerCamelCase__ ): if os.path.exists(os.path.join(lowerCamelCase__ , """config.json""" ) ) and os.path.isfile( os.path.join(lowerCamelCase__ , """config.json""" ) ): os.remove(os.path.join(lowerCamelCase__ , """config.json""" ) ) if os.path.exists(os.path.join(lowerCamelCase__ , """pytorch_model.bin""" ) ) and os.path.isfile( os.path.join(lowerCamelCase__ , """pytorch_model.bin""" ) ): os.remove(os.path.join(lowerCamelCase__ , """pytorch_model.bin""" ) ) else: os.makedirs(lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__=False ): '''simple docstring''' A_ : Optional[Any] = 2 if unlogit: A_ : List[str] = torch.pow(lowerCamelCase__ , lowerCamelCase__ ) A_ : Any = p * torch.log(lowerCamelCase__ ) A_ : Tuple = 0 return -plogp.sum(dim=-1 ) def a ( lowerCamelCase__ ): '''simple docstring''' logger.info("""lv, h >\t""" + """\t""".join(f'{x + 1}' for x in range(len(lowerCamelCase__ ) ) ) ) for row in range(len(lowerCamelCase__ ) ): if tensor.dtype != torch.long: logger.info(f'layer {row + 1}:\t' + """\t""".join(f'{x:.5f}' for x in tensor[row].cpu().data ) ) else: logger.info(f'layer {row + 1}:\t' + """\t""".join(f'{x:d}' for x in tensor[row].cpu().data ) ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=False ): '''simple docstring''' A_, A_ : Union[str, Any] = model.config.num_hidden_layers, model.config.num_attention_heads A_ : Tuple = torch.zeros(lowerCamelCase__ , lowerCamelCase__ ).to(args.device ) A_ : Union[str, Any] = torch.zeros(lowerCamelCase__ , lowerCamelCase__ ).to(args.device ) if head_mask is None: A_ : List[str] = torch.ones(lowerCamelCase__ , lowerCamelCase__ ).to(args.device ) head_mask.requires_grad_(requires_grad=lowerCamelCase__ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: A_ : Tuple = None A_ : Optional[Any] = 0.0 A_ : int = 0.0 for step, inputs in enumerate(tqdm(lowerCamelCase__ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ): A_ : str = tuple(t.to(args.device ) for t in inputs ) ((A_ ), ) : str = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) A_ : Optional[Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ , head_mask=lowerCamelCase__ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) A_, A_, A_ : Any = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(lowerCamelCase__ ): A_ : List[str] = entropy(attn.detach() , lowerCamelCase__ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(lowerCamelCase__ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: A_ : Optional[int] = 2 A_ : Union[str, Any] = torch.pow(torch.pow(lowerCamelCase__ , lowerCamelCase__ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: A_ : int = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("""Attention entropies""" ) print_ad_tensor(lowerCamelCase__ ) if compute_importance: logger.info("""Head importance scores""" ) print_ad_tensor(lowerCamelCase__ ) logger.info("""Head ranked by importance scores""" ) A_ : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) A_ : Any = torch.arange( head_importance.numel() , device=args.device ) A_ : Optional[Any] = head_ranks.view_as(lowerCamelCase__ ) print_ad_tensor(lowerCamelCase__ ) return attn_entropy, head_importance, total_loss def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_, A_, A_ : Dict = compute_heads_importance(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ ) A_ : Dict = 1 / loss # instead of downsteam score use the LM loss logger.info("""Pruning: original score: %f, threshold: %f""" , lowerCamelCase__ , original_score * args.masking_threshold ) A_ : List[str] = torch.ones_like(lowerCamelCase__ ) A_ : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) A_ : List[str] = original_score while current_score >= original_score * args.masking_threshold: A_ : Optional[Any] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads A_ : Optional[int] = float("""Inf""" ) A_ : Tuple = head_importance.view(-1 ).sort()[1] if len(lowerCamelCase__ ) <= num_to_mask: print("""BREAK BY num_to_mask""" ) break # mask heads A_ : Optional[Any] = current_heads_to_mask[:num_to_mask] logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) ) A_ : List[Any] = new_head_mask.view(-1 ) A_ : Optional[int] = 0.0 A_ : List[Any] = new_head_mask.view_as(lowerCamelCase__ ) A_ : List[str] = new_head_mask.clone().detach() print_ad_tensor(lowerCamelCase__ ) # Compute metric and head importance again A_, A_, A_ : int = compute_heads_importance( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ , head_mask=lowerCamelCase__ ) A_ : Any = 1 / loss logger.info( """Masking: current score: %f, remaining heads %d (%.1f percents)""" , lowerCamelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , ) logger.info("""Final head mask""" ) print_ad_tensor(lowerCamelCase__ ) np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() ) return head_mask def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : int = datetime.now() A_, A_, A_ : Optional[int] = compute_heads_importance( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ , compute_importance=lowerCamelCase__ , head_mask=lowerCamelCase__ ) A_ : Optional[Any] = 1 / loss A_ : Optional[Any] = datetime.now() - before_time A_ : List[Any] = sum(p.numel() for p in model.parameters() ) A_ : Union[str, Any] = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCamelCase__ ) ) } for k, v in heads_to_prune.items(): if isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Optional[int] = [ v, ] assert sum(len(lowerCamelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(lowerCamelCase__ ) A_ : int = sum(p.numel() for p in model.parameters() ) A_ : List[Any] = datetime.now() A_, A_, A_ : Tuple = compute_heads_importance( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ , compute_importance=lowerCamelCase__ , head_mask=lowerCamelCase__ , actually_pruned=lowerCamelCase__ , ) A_ : Tuple = 1 / loss A_ : Dict = datetime.now() - before_time logger.info( """Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , lowerCamelCase__ , lowerCamelCase__ , pruned_num_params / original_num_params * 1_00 , ) logger.info("""Pruning: score with masking: %f score with pruning: %f""" , lowerCamelCase__ , lowerCamelCase__ ) logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 1_00 ) save_model(lowerCamelCase__ , args.output_dir ) def a ( ): '''simple docstring''' A_ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--data_dir""" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , ) parser.add_argument( """--model_name_or_path""" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--output_dir""" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""The output directory where the model predictions and checkpoints will be written.""" , ) # Other parameters parser.add_argument( """--config_name""" , default="""""" , type=lowerCamelCase__ , help="""Pretrained config name or path if not the same as model_name_or_path""" , ) parser.add_argument( """--tokenizer_name""" , default="""""" , type=lowerCamelCase__ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , ) parser.add_argument( """--cache_dir""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""Where do you want to store the pre-trained models downloaded from s3""" , ) parser.add_argument( """--data_subset""" , type=lowerCamelCase__ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" ) parser.add_argument( """--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) parser.add_argument( """--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" ) parser.add_argument( """--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , ) parser.add_argument( """--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" ) parser.add_argument( """--masking_threshold""" , default=0.9 , type=lowerCamelCase__ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , ) parser.add_argument( """--masking_amount""" , default=0.1 , type=lowerCamelCase__ , help="""Amount to heads to masking at each masking step.""" ) parser.add_argument("""--metric_name""" , default="""acc""" , type=lowerCamelCase__ , help="""Metric to use for head masking.""" ) parser.add_argument( """--max_seq_length""" , default=1_28 , type=lowerCamelCase__ , help=( """The maximum total input sequence length after WordPiece tokenization. \n""" """Sequences longer than this will be truncated, sequences shorter padded.""" ) , ) parser.add_argument("""--batch_size""" , default=1 , type=lowerCamelCase__ , help="""Batch size.""" ) parser.add_argument("""--seed""" , type=lowerCamelCase__ , default=42 ) parser.add_argument("""--local_rank""" , type=lowerCamelCase__ , default=-1 , help="""local_rank for distributed training on gpus""" ) parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" ) parser.add_argument("""--server_ip""" , type=lowerCamelCase__ , default="""""" , help="""Can be used for distant debugging.""" ) parser.add_argument("""--server_port""" , type=lowerCamelCase__ , default="""""" , help="""Can be used for distant debugging.""" ) A_ : int = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("""Waiting for debugger attach""" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase__ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: A_ : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" ) A_ : int = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) A_ : Union[str, Any] = torch.device("""cuda""" , args.local_rank ) A_ : Dict = 1 torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) A_ : str = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: A_ : Any = nn.parallel.DistributedDataParallel( lowerCamelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCamelCase__ ) elif args.n_gpu > 1: A_ : List[str] = nn.DataParallel(lowerCamelCase__ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=lowerCamelCase__ ) torch.save(lowerCamelCase__ , os.path.join(args.output_dir , """run_args.bin""" ) ) logger.info("""Training/evaluation parameters %s""" , lowerCamelCase__ ) # Prepare dataset A_ : Optional[Any] = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) A_ : Optional[Any] = (torch.from_numpy(lowerCamelCase__ ),) A_ : List[Any] = TensorDataset(*lowerCamelCase__ ) A_ : Optional[int] = RandomSampler(lowerCamelCase__ ) A_ : Optional[Any] = DataLoader(lowerCamelCase__ , sampler=lowerCamelCase__ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: A_ : List[Any] = mask_heads(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) prune_heads(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if __name__ == "__main__": main()
206
"""simple docstring""" from sklearn.metrics import recall_score import datasets UpperCAmelCase__ = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ UpperCAmelCase__ = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ UpperCAmelCase__ = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): def lowerCAmelCase_ ( self : Tuple ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]="binary" , __lowerCAmelCase : Any=None , __lowerCAmelCase : int="warn" , ): _UpperCAmelCase = recall_score( __lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase , zero_division=__lowerCAmelCase , ) return {"recall": float(__lowerCAmelCase ) if score.size == 1 else score}
289
0
"""simple docstring""" import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup __a = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" " (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582" } def A_ ( _lowercase = "dhaka", _lowercase = 5 ): '''simple docstring''' snake_case_ :Dict = min(_lowercase, 50 ) # Prevent abuse! snake_case_ :Optional[int] = { """q""": query, """tbm""": """isch""", """hl""": """en""", """ijn""": """0""", } snake_case_ :List[Any] = requests.get("""https://www.google.com/search""", params=_lowercase, headers=_lowercase ) snake_case_ :str = BeautifulSoup(html.text, """html.parser""" ) snake_case_ :Optional[Any] = """""".join( re.findall(r"""AF_initDataCallback\(([^<]+)\);""", str(soup.select("""script""" ) ) ) ) snake_case_ :Dict = json.dumps(_lowercase ) snake_case_ :str = json.loads(_lowercase ) snake_case_ :Union[str, Any] = re.findall( r"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""", _lowercase, ) if not matched_google_image_data: return 0 snake_case_ :Tuple = re.sub( r"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""", """""", str(_lowercase ), ) snake_case_ :str = re.findall( r"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""", _lowercase, ) for index, fixed_full_res_image in enumerate(_lowercase ): if index >= max_images: return index snake_case_ :Optional[Any] = bytes(_lowercase, """ascii""" ).decode( """unicode-escape""" ) snake_case_ :Dict = bytes(_lowercase, """ascii""" ).decode( """unicode-escape""" ) snake_case_ :Any = urllib.request.build_opener() snake_case_ :str = [ ( """User-Agent""", """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36""" """ (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""", ) ] urllib.request.install_opener(_lowercase ) snake_case_ :Tuple = f"""query_{query.replace(' ', '_' )}""" if not os.path.exists(_lowercase ): os.makedirs(_lowercase ) urllib.request.urlretrieve( # noqa: S310 _lowercase, f"""{path_name}/original_size_img_{index}.jpg""" ) return index if __name__ == "__main__": try: __a = download_images_from_google_query(sys.argv[1]) print(F"""{image_count} images were downloaded to disk.""") except IndexError: print("Please provide a search term.") raise
66
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase__ = """platform""" import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class a : _snake_case : Tuple = PegasusConfig _snake_case : int = {} _snake_case : str = 'gelu' def __init__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=99 , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=20 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Any=0 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = eos_token_id _UpperCAmelCase = pad_token_id _UpperCAmelCase = bos_token_id def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) _UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) _UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _UpperCAmelCase = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return config, inputs_dict def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ): _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] ) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ): _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] ) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _UpperCAmelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase ) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ,): """simple docstring""" if attention_mask is None: _UpperCAmelCase = np.not_equal(lowercase ,config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: _UpperCAmelCase = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ), ] ,axis=-1 ,) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Dict = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) _snake_case : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () _snake_case : Optional[Any] = True _snake_case : List[str] = False _snake_case : Dict = False _snake_case : str = False def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = FlaxPegasusModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_class(__lowerCAmelCase ) @jax.jit def encode_jitted(__lowerCAmelCase : str , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Dict ): return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase ) with self.subTest("""JIT Enabled""" ): _UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCAmelCase = model_class(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) _UpperCAmelCase = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(__lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ): return model.decode( decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , ) with self.subTest("""JIT Enabled""" ): _UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCAmelCase_ ( self : Optional[int] ): for model_class_name in self.all_model_classes: _UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__lowerCAmelCase ) _UpperCAmelCase = np.ones((1, 1) ) _UpperCAmelCase = model(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) _UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) _UpperCAmelCase = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] _UpperCAmelCase = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] _UpperCAmelCase = tokenizer(__lowerCAmelCase , return_tensors="""np""" , truncation=__lowerCAmelCase , max_length=512 , padding=__lowerCAmelCase ) _UpperCAmelCase = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences _UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) assert tgt_text == decoded
289
0
'''simple docstring''' import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": __snake_case = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') __snake_case = F"""https://www.google.com/search?q={query}&num=100""" __snake_case = requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: __snake_case = ( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: __snake_case = parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
97
"""simple docstring""" import math def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [] _UpperCAmelCase = 2 _UpperCAmelCase = int(math.sqrt(lowercase ) ) # Size of every segment _UpperCAmelCase = [True] * (end + 1) _UpperCAmelCase = [] while start <= end: if temp[start] is True: in_prime.append(lowercase ) for i in range(start * start ,end + 1 ,lowercase ): _UpperCAmelCase = False start += 1 prime += in_prime _UpperCAmelCase = end + 1 _UpperCAmelCase = min(2 * end ,lowercase ) while low <= n: _UpperCAmelCase = [True] * (high - low + 1) for each in in_prime: _UpperCAmelCase = math.floor(low / each ) * each if t < low: t += each for j in range(lowercase ,high + 1 ,lowercase ): _UpperCAmelCase = False for j in range(len(lowercase ) ): if temp[j] is True: prime.append(j + low ) _UpperCAmelCase = high + 1 _UpperCAmelCase = min(high + end ,lowercase ) return prime print(sieve(1_0**6))
289
0
import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class A_ : def __init__( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int=2 , UpperCAmelCase : Dict=3_2 , UpperCAmelCase : str=1_6 , UpperCAmelCase : Dict=3 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : str=True , UpperCAmelCase : Tuple=3_2 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Dict=[0, 1, 2, 3] , UpperCAmelCase : List[Any]=4 , UpperCAmelCase : int=3_7 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Any=0.1 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : List[str]=0.02 , UpperCAmelCase : str=3 , UpperCAmelCase : str=[1, 3_8_4, 2_4, 2_4] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : List[str]=None , ) -> Tuple: __lowerCAmelCase: List[Any] = parent __lowerCAmelCase: int = batch_size __lowerCAmelCase: Union[str, Any] = image_size __lowerCAmelCase: Optional[Any] = patch_size __lowerCAmelCase: Any = num_channels __lowerCAmelCase: Optional[int] = is_training __lowerCAmelCase: Optional[int] = use_labels __lowerCAmelCase: Any = hidden_size __lowerCAmelCase: Optional[int] = num_hidden_layers __lowerCAmelCase: Tuple = backbone_out_indices __lowerCAmelCase: Union[str, Any] = num_attention_heads __lowerCAmelCase: List[Any] = intermediate_size __lowerCAmelCase: Any = hidden_act __lowerCAmelCase: int = hidden_dropout_prob __lowerCAmelCase: Dict = attention_probs_dropout_prob __lowerCAmelCase: List[Any] = initializer_range __lowerCAmelCase: Dict = num_labels __lowerCAmelCase: Optional[Any] = backbone_featmap_shape __lowerCAmelCase: Union[str, Any] = scope __lowerCAmelCase: Dict = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) __lowerCAmelCase: Any = (image_size // patch_size) ** 2 __lowerCAmelCase: Optional[int] = num_patches + 1 def UpperCAmelCase ( self : Any ) -> Optional[Any]: __lowerCAmelCase: Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCAmelCase: List[str] = None if self.use_labels: __lowerCAmelCase: int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __lowerCAmelCase: Optional[Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase ( self : Optional[int] ) -> int: __lowerCAmelCase: Any = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, 'hidden_sizes': [9_6, 1_9_2, 3_8_4, 7_6_8], 'num_groups': 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__lowerCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , ) def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] ) -> Dict: __lowerCAmelCase: int = DPTModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowerCAmelCase: List[Any] = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] ) -> Tuple: __lowerCAmelCase: int = self.num_labels __lowerCAmelCase: Union[str, Any] = DPTForDepthEstimation(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowerCAmelCase: List[Any] = model(__lowerCAmelCase ) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) ) def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] ) -> List[Any]: __lowerCAmelCase: int = self.num_labels __lowerCAmelCase: List[str] = DPTForSemanticSegmentation(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowerCAmelCase: Optional[int] = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def UpperCAmelCase ( self : Any ) -> Any: __lowerCAmelCase: int = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = config_and_inputs __lowerCAmelCase: Any = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _lowercase : Optional[int] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () _lowercase : Any = ( { 'depth-estimation': DPTForDepthEstimation, 'feature-extraction': DPTModel, 'image-segmentation': DPTForSemanticSegmentation, } if is_torch_available() else {} ) _lowercase : Dict = False _lowercase : str = False _lowercase : int = False def UpperCAmelCase ( self : int ) -> List[str]: __lowerCAmelCase: str = DPTModelTester(self ) __lowerCAmelCase: int = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 ) def UpperCAmelCase ( self : List[str] ) -> int: self.config_tester.run_common_tests() @unittest.skip(reason='DPT does not use inputs_embeds' ) def UpperCAmelCase ( self : str ) -> Optional[Any]: pass def UpperCAmelCase ( self : str ) -> List[str]: __lowerCAmelCase , __lowerCAmelCase: Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase: str = model_class(__lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCAmelCase: Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) ) def UpperCAmelCase ( self : str ) -> Union[str, Any]: __lowerCAmelCase , __lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase: str = model_class(__lowerCAmelCase ) __lowerCAmelCase: int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase: Optional[int] = [*signature.parameters.keys()] __lowerCAmelCase: Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def UpperCAmelCase ( self : Optional[Any] ) -> Tuple: __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def UpperCAmelCase ( self : str ) -> str: __lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*__lowerCAmelCase ) def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: __lowerCAmelCase: int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase ) def UpperCAmelCase ( self : Dict ) -> List[str]: for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue __lowerCAmelCase , __lowerCAmelCase: int = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase: str = True if model_class in get_values(__lowerCAmelCase ): continue __lowerCAmelCase: Dict = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() __lowerCAmelCase: Optional[int] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) __lowerCAmelCase: List[Any] = model(**__lowerCAmelCase ).loss loss.backward() def UpperCAmelCase ( self : Any ) -> str: for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue __lowerCAmelCase , __lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase: Dict = False __lowerCAmelCase: Optional[Any] = True if model_class in get_values(__lowerCAmelCase ) or not model_class.supports_gradient_checkpointing: continue __lowerCAmelCase: Tuple = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.gradient_checkpointing_enable() model.train() __lowerCAmelCase: Optional[Any] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) __lowerCAmelCase: List[Any] = model(**__lowerCAmelCase ).loss loss.backward() def UpperCAmelCase ( self : Any ) -> str: __lowerCAmelCase , __lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase: Dict = _config_zero_init(__lowerCAmelCase ) for model_class in self.all_model_classes: __lowerCAmelCase: Tuple = model_class(config=__lowerCAmelCase ) # Skip the check for the backbone __lowerCAmelCase: Dict = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": __lowerCAmelCase: Optional[Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCAmelCase ( self : List[Any] ) -> List[Any]: pass @slow def UpperCAmelCase ( self : List[Any] ) -> List[Any]: for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: __lowerCAmelCase: Dict = DPTModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def UpperCAmelCase ( self : Optional[Any] ) -> Any: # We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type __lowerCAmelCase , __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase: Any = 'add' with self.assertRaises(__lowerCAmelCase ): __lowerCAmelCase: List[Any] = DPTForDepthEstimation(__lowerCAmelCase ) def _a ( ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase: Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision @slow class A_ ( unittest.TestCase ): def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]: __lowerCAmelCase: List[Any] = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' ) __lowerCAmelCase: Tuple = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(__lowerCAmelCase ) __lowerCAmelCase: Dict = prepare_img() __lowerCAmelCase: Optional[int] = image_processor(images=__lowerCAmelCase , return_tensors='pt' ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): __lowerCAmelCase: Optional[int] = model(**__lowerCAmelCase ) __lowerCAmelCase: List[str] = outputs.predicted_depth # verify the predicted depth __lowerCAmelCase: Optional[Any] = torch.Size((1, 3_8_4, 3_8_4) ) self.assertEqual(predicted_depth.shape , __lowerCAmelCase ) __lowerCAmelCase: Optional[int] = torch.tensor( [[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , __lowerCAmelCase , atol=1E-4 ) )
322
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _UpperCAmelCase = TapasConfig.from_json_file(lowercase ) # set absolute/relative position embeddings parameter _UpperCAmelCase = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "WTQ": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = True # hparam_utils.py hparams _UpperCAmelCase = 0.66_46_94 _UpperCAmelCase = 0.20_79_51 _UpperCAmelCase = 0.12_11_94 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = 0.0_35_25_13 _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = False # hparam_utils.py hparams _UpperCAmelCase = 36.45_19 _UpperCAmelCase = 0.90_34_21 _UpperCAmelCase = 2_22.0_88 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = 0.76_31_41 _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "TABFACT": _UpperCAmelCase = TapasForSequenceClassification(config=lowercase ) elif task == "MLM": _UpperCAmelCase = TapasForMaskedLM(config=lowercase ) elif task == "INTERMEDIATE_PRETRAINING": _UpperCAmelCase = TapasModel(config=lowercase ) else: raise ValueError(f'''Task {task} not supported.''' ) print(f'''Building PyTorch model from configuration: {config}''' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(lowercase ,lowercase ,lowercase ) # Save pytorch-model (weights and configuration) print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(lowercase ) # Save tokenizer files print(f'''Save tokenizer files to {pytorch_dump_path}''' ) _UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" ,model_max_length=5_12 ) tokenizer.save_pretrained(lowercase ) print("""Used relative position embeddings:""" ,model.config.reset_position_index_per_cell ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
289
0
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __a : def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=16 , lowerCAmelCase__=36 , lowerCAmelCase__=6 , lowerCAmelCase__=6 , lowerCAmelCase__=6 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> Tuple: '''simple docstring''' lowercase__: List[str] = parent lowercase__: List[Any] = batch_size lowercase__: List[str] = seq_length lowercase__: Any = is_training lowercase__: Dict = use_input_mask lowercase__: List[str] = use_token_type_ids lowercase__: str = use_labels lowercase__: int = vocab_size lowercase__: List[str] = embedding_size lowercase__: Dict = hidden_size lowercase__: Any = num_hidden_layers lowercase__: List[str] = num_hidden_groups lowercase__: int = num_attention_heads lowercase__: int = intermediate_size lowercase__: Optional[int] = hidden_act lowercase__: List[Any] = hidden_dropout_prob lowercase__: Dict = attention_probs_dropout_prob lowercase__: Optional[Any] = max_position_embeddings lowercase__: Optional[Any] = type_vocab_size lowercase__: List[Any] = type_sequence_label_size lowercase__: List[str] = initializer_range lowercase__: Any = num_labels lowercase__: Optional[Any] = num_choices lowercase__: str = scope def SCREAMING_SNAKE_CASE__ ( self ) -> int: '''simple docstring''' lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__: Tuple = None if self.use_input_mask: lowercase__: Dict = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__: List[str] = None if self.use_token_type_ids: lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__: int = None lowercase__: Union[str, Any] = None lowercase__: str = None if self.use_labels: lowercase__: Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__: Dict = ids_tensor([self.batch_size] , self.num_choices ) lowercase__: int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple: '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: '''simple docstring''' lowercase__: Any = AlbertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowercase__: Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowercase__: Any = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowercase__: List[str] = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: '''simple docstring''' lowercase__: Dict = AlbertForPreTraining(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowercase__: Optional[Any] = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , sentence_order_label=__lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: '''simple docstring''' lowercase__: Dict = AlbertForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowercase__: List[Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: '''simple docstring''' lowercase__: Optional[int] = AlbertForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowercase__: str = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: '''simple docstring''' lowercase__: Optional[Any] = self.num_labels lowercase__: str = AlbertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowercase__: Optional[Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: '''simple docstring''' lowercase__: List[str] = self.num_labels lowercase__: Optional[int] = AlbertForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowercase__: str = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: '''simple docstring''' lowercase__: List[Any] = self.num_choices lowercase__: Dict = AlbertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowercase__: Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__: List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__: Optional[Any] = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]: '''simple docstring''' lowercase__: int = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ): Union[str, Any] = config_and_inputs lowercase__: str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): __lowercase : str = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) __lowercase : Tuple = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) __lowercase : Dict = True def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> int: '''simple docstring''' lowercase__: List[str] = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): lowercase__: Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase ) lowercase__: List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple: '''simple docstring''' lowercase__: Dict = AlbertModelTester(self ) lowercase__: Union[str, Any] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]: '''simple docstring''' lowercase__: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]: '''simple docstring''' lowercase__: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple: '''simple docstring''' lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]: '''simple docstring''' lowercase__: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' lowercase__: Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self ) -> int: '''simple docstring''' lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]: '''simple docstring''' lowercase__: Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase__: List[str] = type self.model_tester.create_and_check_model(*__lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__: Tuple = AlbertModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_torch class __a ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self ) -> Dict: '''simple docstring''' lowercase__: Tuple = AlbertModel.from_pretrained('albert-base-v2' ) lowercase__: Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) lowercase__: str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase__: Optional[Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0] lowercase__: List[Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) lowercase__: Union[str, Any] = torch.tensor( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 ) )
196
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml UpperCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" def run_func(lowercase ): @wraps(lowercase ) def run_in_eager_mode(*lowercase ,**lowercase ): return func(*lowercase ,**lowercase ) @wraps(lowercase ) @tf.function(experimental_compile=lowercase ) def run_in_graph_mode(*lowercase ,**lowercase ): return func(*lowercase ,**lowercase ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = random.Random() _UpperCAmelCase = [rng.randint(0 ,vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(lowercase ,shape=(batch_size, sequence_length) ,dtype=tf.intaa ) class a ( lowerCAmelCase_ ): _snake_case : TensorFlowBenchmarkArguments _snake_case : PretrainedConfig _snake_case : str = "TensorFlow" @property def lowerCAmelCase_ ( self : Union[str, Any] ): return tf.__version__ def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): # initialize GPU on separate process _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_speed(_inference ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_speed(_train ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase ) _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_memory(_inference ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase ) _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_memory(_train ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _UpperCAmelCase = ( hasattr(__lowerCAmelCase , """architectures""" ) and isinstance(config.architectures , __lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_cls(__lowerCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _UpperCAmelCase = TF_MODEL_MAPPING[config.__class__](__lowerCAmelCase ) # encoder-decoder has vocab size saved differently _UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size _UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , training=__lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(__lowerCAmelCase , training=__lowerCAmelCase ) _UpperCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _UpperCAmelCase = ( hasattr(__lowerCAmelCase , """architectures""" ) and isinstance(config.architectures , __lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_cls(__lowerCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _UpperCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__lowerCAmelCase ) # encoder-decoder has vocab size saved differently _UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size _UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): _UpperCAmelCase = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0] _UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): _UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0] _UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables ) return gradients _UpperCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Any ): with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(__lowerCAmelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _UpperCAmelCase = timeit.repeat( __lowerCAmelCase , repeat=self.args.repeat , number=10 , ) return min(__lowerCAmelCase ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Callable[[], None] ): logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _UpperCAmelCase = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _UpperCAmelCase = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _UpperCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _UpperCAmelCase = nvml.nvmlDeviceGetMemoryInfo(__lowerCAmelCase ) _UpperCAmelCase = meminfo.used _UpperCAmelCase = Memory(__lowerCAmelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _UpperCAmelCase = None else: _UpperCAmelCase = measure_peak_memory_cpu(__lowerCAmelCase ) _UpperCAmelCase = Memory(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else memory_bytes if self.args.trace_memory_line_by_line: _UpperCAmelCase = stop_memory_tracing(__lowerCAmelCase ) if memory is None: _UpperCAmelCase = summary.total else: _UpperCAmelCase = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) return "N/A", None
289
0
'''simple docstring''' import logging from transformers.configuration_utils import PretrainedConfig _SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__) class _snake_case ( lowerCAmelCase_ ): lowerCAmelCase_ : List[str] = 'masked_bert' def __init__( self , a__=30_522 , a__=768 , a__=12 , a__=12 , a__=3_072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=0 , a__="topK" , a__="constant" , a__=0.0 , **a__ , ) -> Dict: '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = hidden_act snake_case_ = intermediate_size snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = pruning_method snake_case_ = mask_init snake_case_ = mask_scale
85
"""simple docstring""" from math import pow def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,): """simple docstring""" if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count _UpperCAmelCase = int(pow(lowercase ,lowercase ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n _UpperCAmelCase , _UpperCAmelCase = backtrack( lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. _UpperCAmelCase , _UpperCAmelCase = backtrack( lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase ) return current_sum, solutions_count def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10): raise ValueError( """Invalid input\n""" """needed_sum must be between 1 and 1000, power between 2 and 10.""" ) return backtrack(lowercase ,lowercase ,1 ,0 ,0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
289
0
"""simple docstring""" from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker SCREAMING_SNAKE_CASE__ = "CompVis/stable-diffusion-v1-1" SCREAMING_SNAKE_CASE__ = "CompVis/stable-diffusion-v1-2" SCREAMING_SNAKE_CASE__ = "CompVis/stable-diffusion-v1-3" SCREAMING_SNAKE_CASE__ = "CompVis/stable-diffusion-v1-4" class lowercase ( lowerCAmelCase_ ): def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = True , ) -> Union[str, Any]: super()._init_() lowerCAmelCase = StableDiffusionPipeline.from_pretrained(__lowerCAmelCase ) lowerCAmelCase = StableDiffusionPipeline.from_pretrained(__lowerCAmelCase ) lowerCAmelCase = StableDiffusionPipeline.from_pretrained(__lowerCAmelCase ) lowerCAmelCase = StableDiffusionPipeline( vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , requires_safety_checker=__lowerCAmelCase , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def _snake_case ( self ) -> Any: return {k: getattr(self , __lowerCAmelCase ) for k in self.config.keys() if not k.startswith("""_""" )} def _snake_case ( self , lowercase = "auto" ) -> Tuple: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowerCAmelCase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__lowerCAmelCase ) def _snake_case ( self ) -> List[Any]: self.enable_attention_slicing(__lowerCAmelCase ) @torch.no_grad() def _snake_case ( self , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> List[str]: return self.pipea( prompt=__lowerCAmelCase , height=__lowerCAmelCase , width=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , output_type=__lowerCAmelCase , return_dict=__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=__lowerCAmelCase , **__lowerCAmelCase , ) @torch.no_grad() def _snake_case ( self , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> List[str]: return self.pipea( prompt=__lowerCAmelCase , height=__lowerCAmelCase , width=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , output_type=__lowerCAmelCase , return_dict=__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=__lowerCAmelCase , **__lowerCAmelCase , ) @torch.no_grad() def _snake_case ( self , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> Union[str, Any]: return self.pipea( prompt=__lowerCAmelCase , height=__lowerCAmelCase , width=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , output_type=__lowerCAmelCase , return_dict=__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=__lowerCAmelCase , **__lowerCAmelCase , ) @torch.no_grad() def _snake_case ( self , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> List[str]: return self.pipea( prompt=__lowerCAmelCase , height=__lowerCAmelCase , width=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , output_type=__lowerCAmelCase , return_dict=__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=__lowerCAmelCase , **__lowerCAmelCase , ) @torch.no_grad() def _snake_case ( self , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> Any: lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu""" self.to(__lowerCAmelCase ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` must be divisible by 8 but are {height} and {width}.' ) # Get first result from Stable Diffusion Checkpoint v1.1 lowerCAmelCase = self.textaimg_sda_a( prompt=__lowerCAmelCase , height=__lowerCAmelCase , width=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , output_type=__lowerCAmelCase , return_dict=__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=__lowerCAmelCase , **__lowerCAmelCase , ) # Get first result from Stable Diffusion Checkpoint v1.2 lowerCAmelCase = self.textaimg_sda_a( prompt=__lowerCAmelCase , height=__lowerCAmelCase , width=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , output_type=__lowerCAmelCase , return_dict=__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=__lowerCAmelCase , **__lowerCAmelCase , ) # Get first result from Stable Diffusion Checkpoint v1.3 lowerCAmelCase = self.textaimg_sda_a( prompt=__lowerCAmelCase , height=__lowerCAmelCase , width=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , output_type=__lowerCAmelCase , return_dict=__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=__lowerCAmelCase , **__lowerCAmelCase , ) # Get first result from Stable Diffusion Checkpoint v1.4 lowerCAmelCase = self.textaimg_sda_a( prompt=__lowerCAmelCase , height=__lowerCAmelCase , width=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , output_type=__lowerCAmelCase , return_dict=__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=__lowerCAmelCase , **__lowerCAmelCase , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
46
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } UpperCAmelCase__ = { """b0""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 2_2_4, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 2_4_0, """dropout_rate""": 0.2, """dw_padding""": [1_6], }, """b2""": { """hidden_dim""": 1_4_0_8, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 2_6_0, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 1_6], }, """b3""": { """hidden_dim""": 1_5_3_6, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 3_0_0, """dropout_rate""": 0.3, """dw_padding""": [5, 1_8], }, """b4""": { """hidden_dim""": 1_7_9_2, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 3_8_0, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_0_4_8, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 4_5_6, """dropout_rate""": 0.4, """dw_padding""": [1_3, 2_7], }, """b6""": { """hidden_dim""": 2_3_0_4, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 5_2_8, """dropout_rate""": 0.5, """dw_padding""": [3_1], }, """b7""": { """hidden_dim""": 2_5_6_0, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 6_0_0, """dropout_rate""": 0.5, """dw_padding""": [1_8], }, } def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = EfficientNetConfig() _UpperCAmelCase = CONFIG_MAP[model_name]["""hidden_dim"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""width_coef"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""depth_coef"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""dropout_rate"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""dw_padding"""] _UpperCAmelCase = """huggingface/label-files""" _UpperCAmelCase = """imagenet-1k-id2label.json""" _UpperCAmelCase = 10_00 _UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) ) _UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw ) return im def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = EfficientNetImageProcessor( size={"""height""": size, """width""": size} ,image_mean=[0.4_85, 0.4_56, 0.4_06] ,image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] ,do_center_crop=lowercase ,) return preprocessor def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] _UpperCAmelCase = sorted(set(lowercase ) ) _UpperCAmelCase = len(lowercase ) _UpperCAmelCase = {b: str(lowercase ) for b, i in zip(lowercase ,range(lowercase ) )} _UpperCAmelCase = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: _UpperCAmelCase = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) _UpperCAmelCase = {} for item in rename_keys: if item[0] in original_param_names: _UpperCAmelCase = """efficientnet.""" + item[1] _UpperCAmelCase = """classifier.weight""" _UpperCAmelCase = """classifier.bias""" return key_mapping def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue _UpperCAmelCase = key_mapping[key] if "_conv" in key and "kernel" in key: _UpperCAmelCase = torch.from_numpy(lowercase ).permute(3 ,2 ,0 ,1 ) elif "depthwise_kernel" in key: _UpperCAmelCase = torch.from_numpy(lowercase ).permute(2 ,3 ,0 ,1 ) elif "kernel" in key: _UpperCAmelCase = torch.from_numpy(np.transpose(lowercase ) ) else: _UpperCAmelCase = torch.from_numpy(lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase ) @torch.no_grad() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = model_classes[model_name]( include_top=lowercase ,weights="""imagenet""" ,input_tensor=lowercase ,input_shape=lowercase ,pooling=lowercase ,classes=10_00 ,classifier_activation="""softmax""" ,) _UpperCAmelCase = original_model.trainable_variables _UpperCAmelCase = original_model.non_trainable_variables _UpperCAmelCase = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: _UpperCAmelCase = param.numpy() _UpperCAmelCase = list(tf_params.keys() ) # Load HuggingFace model _UpperCAmelCase = get_efficientnet_config(lowercase ) _UpperCAmelCase = EfficientNetForImageClassification(lowercase ).eval() _UpperCAmelCase = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) _UpperCAmelCase = rename_keys(lowercase ) replace_params(lowercase ,lowercase ,lowercase ) # Initialize preprocessor and preprocess input image _UpperCAmelCase = convert_image_processor(lowercase ) _UpperCAmelCase = preprocessor(images=prepare_img() ,return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): _UpperCAmelCase = hf_model(**lowercase ) _UpperCAmelCase = outputs.logits.detach().numpy() # Original model inference _UpperCAmelCase = False _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST ) _UpperCAmelCase = image.img_to_array(lowercase ) _UpperCAmelCase = np.expand_dims(lowercase ,axis=0 ) _UpperCAmelCase = original_model.predict(lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase ,lowercase ,atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(lowercase ): os.mkdir(lowercase ) # Save converted model and image processor hf_model.save_pretrained(lowercase ) preprocessor.save_pretrained(lowercase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) _UpperCAmelCase = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowercase ) hf_model.push_to_hub(lowercase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") UpperCAmelCase__ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
289
0
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def _lowerCAmelCase ( __lowerCAmelCase ) -> Union[str, Any]: """simple docstring""" assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def _lowerCAmelCase ( ) -> Optional[int]: """simple docstring""" assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def _lowerCAmelCase ( ) -> Union[str, Any]: """simple docstring""" snake_case__ : Union[str, Any] = '''mock-s3-bucket''' snake_case__ : List[str] = f"""s3://{mock_bucket}""" snake_case__ : List[Any] = extract_path_from_uri(__lowerCAmelCase ) assert dataset_path.startswith('''s3://''' ) is False snake_case__ : List[Any] = '''./local/path''' snake_case__ : Dict = extract_path_from_uri(__lowerCAmelCase ) assert dataset_path == new_dataset_path def _lowerCAmelCase ( __lowerCAmelCase ) -> Optional[Any]: """simple docstring""" snake_case__ : Union[str, Any] = is_remote_filesystem(__lowerCAmelCase ) assert is_remote is True snake_case__ : List[str] = fsspec.filesystem('''file''' ) snake_case__ : Dict = is_remote_filesystem(__lowerCAmelCase ) assert is_remote is False @pytest.mark.parametrize('''compression_fs_class''' , __lowerCAmelCase ) def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]: """simple docstring""" snake_case__ : Optional[int] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file} snake_case__ : Optional[int] = input_paths[compression_fs_class.protocol] if input_path is None: snake_case__ : str = f"""for \'{compression_fs_class.protocol}\' compression protocol, """ if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(__lowerCAmelCase ) snake_case__ : Tuple = fsspec.filesystem(compression_fs_class.protocol , fo=__lowerCAmelCase ) assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) snake_case__ : Any = os.path.basename(__lowerCAmelCase ) snake_case__ : List[str] = expected_filename[: expected_filename.rindex('''.''' )] assert fs.glob('''*''' ) == [expected_filename] with fs.open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f, open(__lowerCAmelCase , encoding='''utf-8''' ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] ) def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]: """simple docstring""" snake_case__ : Union[str, Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path} snake_case__ : Any = compressed_file_paths[protocol] snake_case__ : Tuple = '''dataset.jsonl''' snake_case__ : List[Any] = f"""{protocol}://{member_file_path}::{compressed_file_path}""" snake_case__ , *snake_case__ : Optional[int] = fsspec.get_fs_token_paths(__lowerCAmelCase ) assert fs.isfile(__lowerCAmelCase ) assert not fs.isfile('''non_existing_''' + member_file_path ) @pytest.mark.integration def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]: """simple docstring""" snake_case__ : Union[str, Any] = hf_api.dataset_info(__lowerCAmelCase , token=__lowerCAmelCase ) snake_case__ : Dict = HfFileSystem(repo_info=__lowerCAmelCase , token=__lowerCAmelCase ) assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"] assert hffs.isdir('''data''' ) assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' ) with open(__lowerCAmelCase ) as f: assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read() def _lowerCAmelCase ( ) -> Any: """simple docstring""" snake_case__ : Optional[Any] = '''bz2''' # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(__lowerCAmelCase , __lowerCAmelCase , clobber=__lowerCAmelCase ) with pytest.warns(__lowerCAmelCase ) as warning_info: importlib.reload(datasets.filesystems ) assert len(__lowerCAmelCase ) == 1 assert ( str(warning_info[0].message ) == f"""A filesystem protocol was already set for {protocol} and will be overwritten.""" )
230
"""simple docstring""" from __future__ import annotations from collections import Counter from random import random class a : def __init__( self : Union[str, Any] ): _UpperCAmelCase = {} def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ): _UpperCAmelCase = {} def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : float ): if nodea not in self.connections: self.add_node(__lowerCAmelCase ) if nodea not in self.connections: self.add_node(__lowerCAmelCase ) _UpperCAmelCase = probability def lowerCAmelCase_ ( self : Optional[Any] ): return list(self.connections ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str ): _UpperCAmelCase = 0 _UpperCAmelCase = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(lowercase ,lowercase ,lowercase ) _UpperCAmelCase = Counter(graph.get_nodes() ) _UpperCAmelCase = start for _ in range(lowercase ): _UpperCAmelCase = graph.transition(lowercase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
289
0
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig lowerCamelCase__ : List[str] = logging.get_logger(__name__) lowerCamelCase__ : Union[str, Any] = { '''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''', # See all DPT models at https://huggingface.co/models?filter=dpt } class _UpperCAmelCase ( lowerCAmelCase_): __a : List[str] = 'dpt' def __init__( self , _A=7_68 , _A=12 , _A=12 , _A=30_72 , _A="gelu" , _A=0.0 , _A=0.0 , _A=0.02 , _A=1e-12 , _A=3_84 , _A=16 , _A=3 , _A=False , _A=True , _A=[2, 5, 8, 11] , _A="project" , _A=[4, 2, 1, 0.5] , _A=[96, 1_92, 3_84, 7_68] , _A=2_56 , _A=-1 , _A=False , _A=True , _A=0.4 , _A=2_55 , _A=0.1 , _A=[1, 10_24, 24, 24] , _A=[0, 1] , _A=None , **_A , ) -> List[str]: '''simple docstring''' super().__init__(**__lowerCAmelCase ) _UpperCAmelCase : str = hidden_size _UpperCAmelCase : Union[str, Any] = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info("""Initializing the config with a `BiT` backbone.""" ) _UpperCAmelCase : str = { """global_padding""": """same""", """layer_type""": """bottleneck""", """depths""": [3, 4, 9], """out_features""": ["""stage1""", """stage2""", """stage3"""], """embedding_dynamic_padding""": True, } _UpperCAmelCase : Dict = BitConfig(**__lowerCAmelCase ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): logger.info("""Initializing the config with a `BiT` backbone.""" ) _UpperCAmelCase : Any = BitConfig(**__lowerCAmelCase ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : Tuple = backbone_config else: raise ValueError( f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' ) _UpperCAmelCase : Tuple = backbone_featmap_shape _UpperCAmelCase : List[Any] = neck_ignore_stages if readout_type != "project": raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" ) else: _UpperCAmelCase : List[Any] = None _UpperCAmelCase : List[Any] = None _UpperCAmelCase : Union[str, Any] = [] _UpperCAmelCase : int = num_hidden_layers _UpperCAmelCase : int = num_attention_heads _UpperCAmelCase : Any = intermediate_size _UpperCAmelCase : List[str] = hidden_act _UpperCAmelCase : Tuple = hidden_dropout_prob _UpperCAmelCase : Optional[int] = attention_probs_dropout_prob _UpperCAmelCase : int = initializer_range _UpperCAmelCase : Optional[int] = layer_norm_eps _UpperCAmelCase : List[Any] = image_size _UpperCAmelCase : Any = patch_size _UpperCAmelCase : Tuple = num_channels _UpperCAmelCase : Union[str, Any] = qkv_bias _UpperCAmelCase : Union[str, Any] = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" ) _UpperCAmelCase : Any = readout_type _UpperCAmelCase : Dict = reassemble_factors _UpperCAmelCase : Dict = neck_hidden_sizes _UpperCAmelCase : str = fusion_hidden_size _UpperCAmelCase : Dict = head_in_index _UpperCAmelCase : Any = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) _UpperCAmelCase : Optional[int] = use_auxiliary_head _UpperCAmelCase : Tuple = auxiliary_loss_weight _UpperCAmelCase : str = semantic_loss_ignore_index _UpperCAmelCase : Dict = semantic_classifier_dropout def __snake_case ( self ) -> int: '''simple docstring''' _UpperCAmelCase : Tuple = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: _UpperCAmelCase : str = self.backbone_config.to_dict() _UpperCAmelCase : Dict = self.__class__.model_type return output
246
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class a ( unittest.TestCase ): def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Optional[Any]=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : List[str]=400 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=None , __lowerCAmelCase : List[str]=True , ): _UpperCAmelCase = size if size is not None else {"""shortest_edge""": 20} _UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = do_flip_channel_order def lowerCAmelCase_ ( self : List[str] ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[int] = MobileViTImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = MobileViTImageProcessingTester(self ) @property def lowerCAmelCase_ ( self : Tuple ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_flip_channel_order""" ) ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def lowerCAmelCase_ ( self : List[str] ): pass def lowerCAmelCase_ ( self : Dict ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self : str ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self : Optional[int] ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
289
0
'''simple docstring''' from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
164
"""simple docstring""" from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : Any = 'efficientnet' def __init__( self : Any , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 600 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2560 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.001 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : List[Any] , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = width_coefficient _UpperCAmelCase = depth_coefficient _UpperCAmelCase = depth_divisor _UpperCAmelCase = kernel_sizes _UpperCAmelCase = in_channels _UpperCAmelCase = out_channels _UpperCAmelCase = depthwise_padding _UpperCAmelCase = strides _UpperCAmelCase = num_block_repeats _UpperCAmelCase = expand_ratios _UpperCAmelCase = squeeze_expansion_ratio _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dim _UpperCAmelCase = pooling_type _UpperCAmelCase = initializer_range _UpperCAmelCase = batch_norm_eps _UpperCAmelCase = batch_norm_momentum _UpperCAmelCase = dropout_rate _UpperCAmelCase = drop_connect_rate _UpperCAmelCase = sum(__lowerCAmelCase ) * 4 class a ( lowerCAmelCase_ ): _snake_case : Dict = version.parse('1.11' ) @property def lowerCAmelCase_ ( self : Any ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase_ ( self : int ): return 1e-5
289
0
"""simple docstring""" import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class snake_case ( unittest.TestCase ): """simple docstring""" @parameterized.expand([(None,), ('foo.json',)] ) def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : str ): UpperCAmelCase__ = GenerationConfig( do_sample=__lowerCAmelCase ,temperature=0.7 ,length_penalty=1.0 ,bad_words_ids=[[1, 2, 3], [4, 5]] ,) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__lowerCAmelCase ,config_name=__lowerCAmelCase ) UpperCAmelCase__ = GenerationConfig.from_pretrained(__lowerCAmelCase ,config_name=__lowerCAmelCase ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample ,__lowerCAmelCase ) self.assertEqual(loaded_config.temperature ,0.7 ) self.assertEqual(loaded_config.length_penalty ,1.0 ) self.assertEqual(loaded_config.bad_words_ids ,[[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k ,50 ) self.assertEqual(loaded_config.max_length ,20 ) self.assertEqual(loaded_config.max_time ,__lowerCAmelCase ) def __lowerCAmelCase ( self : Optional[Any] ): UpperCAmelCase__ = AutoConfig.from_pretrained('gpt2' ) UpperCAmelCase__ = GenerationConfig.from_model_config(__lowerCAmelCase ) UpperCAmelCase__ = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(__lowerCAmelCase ,__lowerCAmelCase ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id ,default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id ,model_config.eos_token_id ) def __lowerCAmelCase ( self : List[str] ): UpperCAmelCase__ = GenerationConfig() UpperCAmelCase__ = { 'max_new_tokens': 1_024, 'foo': 'bar', } UpperCAmelCase__ = copy.deepcopy(__lowerCAmelCase ) UpperCAmelCase__ = generation_config.update(**__lowerCAmelCase ) # update_kwargs was not modified (no side effects) self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens ,1_024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(__lowerCAmelCase ,{'foo': 'bar'} ) def __lowerCAmelCase ( self : Dict ): UpperCAmelCase__ = GenerationConfig() UpperCAmelCase__ = 'bar' with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir: generation_config.save_pretrained(__lowerCAmelCase ) UpperCAmelCase__ = GenerationConfig.from_pretrained(__lowerCAmelCase ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo ,'bar' ) UpperCAmelCase__ = GenerationConfig.from_model_config(__lowerCAmelCase ) assert not hasattr(__lowerCAmelCase ,'foo' ) # no new kwargs should be initialized if from config def __lowerCAmelCase ( self : int ): UpperCAmelCase__ = GenerationConfig() self.assertEqual(default_config.temperature ,1.0 ) self.assertEqual(default_config.do_sample ,__lowerCAmelCase ) self.assertEqual(default_config.num_beams ,1 ) UpperCAmelCase__ = GenerationConfig( do_sample=__lowerCAmelCase ,temperature=0.7 ,length_penalty=1.0 ,bad_words_ids=[[1, 2, 3], [4, 5]] ,) self.assertEqual(config.temperature ,0.7 ) self.assertEqual(config.do_sample ,__lowerCAmelCase ) self.assertEqual(config.num_beams ,1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__lowerCAmelCase ) UpperCAmelCase__ = GenerationConfig.from_pretrained(__lowerCAmelCase ,temperature=1.0 ) self.assertEqual(loaded_config.temperature ,1.0 ) self.assertEqual(loaded_config.do_sample ,__lowerCAmelCase ) self.assertEqual(loaded_config.num_beams ,1 ) # default value @is_staging_test class snake_case ( unittest.TestCase ): """simple docstring""" @classmethod def __lowerCAmelCase ( cls : List[str] ): UpperCAmelCase__ = TOKEN HfFolder.save_token(__lowerCAmelCase ) @classmethod def __lowerCAmelCase ( cls : int ): try: delete_repo(token=cls._token ,repo_id='test-generation-config' ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id='valid_org/test-generation-config-org' ) except HTTPError: pass def __lowerCAmelCase ( self : List[Any] ): UpperCAmelCase__ = GenerationConfig( do_sample=__lowerCAmelCase ,temperature=0.7 ,length_penalty=1.0 ,) config.push_to_hub('test-generation-config' ,use_auth_token=self._token ) UpperCAmelCase__ = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) ) # Reset repo delete_repo(token=self._token ,repo_id='test-generation-config' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __lowerCAmelCase ,repo_id='test-generation-config' ,push_to_hub=__lowerCAmelCase ,use_auth_token=self._token ) UpperCAmelCase__ = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) ) def __lowerCAmelCase ( self : Any ): UpperCAmelCase__ = GenerationConfig( do_sample=__lowerCAmelCase ,temperature=0.7 ,length_penalty=1.0 ,) config.push_to_hub('valid_org/test-generation-config-org' ,use_auth_token=self._token ) UpperCAmelCase__ = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) ) # Reset repo delete_repo(token=self._token ,repo_id='valid_org/test-generation-config-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __lowerCAmelCase ,repo_id='valid_org/test-generation-config-org' ,push_to_hub=__lowerCAmelCase ,use_auth_token=self._token ) UpperCAmelCase__ = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) )
98
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class a : def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : str=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Optional[int]=16 , __lowerCAmelCase : Dict=36 , __lowerCAmelCase : Optional[Any]=6 , __lowerCAmelCase : List[str]=6 , __lowerCAmelCase : Union[str, Any]=6 , __lowerCAmelCase : str=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Any=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = embedding_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_hidden_groups _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Union[str, Any] ): return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Any ): _UpperCAmelCase = AlbertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ): _UpperCAmelCase = AlbertForPreTraining(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , sentence_order_label=__lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ): _UpperCAmelCase = AlbertForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ): _UpperCAmelCase = AlbertForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = AlbertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = AlbertForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = AlbertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : str = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) _snake_case : Tuple = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) _snake_case : Dict = True def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any]=False ): _UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): _UpperCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase ) _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = AlbertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Optional[int] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCAmelCase = type self.model_tester.create_and_check_model(*__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = AlbertModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_torch class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = AlbertModel.from_pretrained("""albert-base-v2""" ) _UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0] _UpperCAmelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = torch.tensor( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
289
0
'''simple docstring''' import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowerCamelCase :Dict = '''platform''' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class _lowerCAmelCase : __SCREAMING_SNAKE_CASE : Tuple = PegasusConfig __SCREAMING_SNAKE_CASE : int = {} __SCREAMING_SNAKE_CASE : str = 'gelu' def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ): A_ : List[str] = parent A_ : Union[str, Any] = batch_size A_ : Optional[int] = seq_length A_ : Dict = is_training A_ : Dict = use_labels A_ : Union[str, Any] = vocab_size A_ : str = hidden_size A_ : List[Any] = num_hidden_layers A_ : Optional[int] = num_attention_heads A_ : List[Any] = intermediate_size A_ : List[str] = hidden_dropout_prob A_ : Optional[int] = attention_probs_dropout_prob A_ : Dict = max_position_embeddings A_ : Dict = eos_token_id A_ : List[Any] = pad_token_id A_ : Dict = bos_token_id def _a (self ): A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) A_ : List[str] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) A_ : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 ) A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : int = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A_ : List[str] = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return config, inputs_dict def _a (self , lowercase , lowercase , lowercase ): A_ : Dict = 20 A_ : List[str] = model_class_name(__lowerCAmelCase ) A_ : Dict = model.encode(inputs_dict["""input_ids"""] ) A_, A_ : Any = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) A_ : Dict = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) A_ : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) A_ : Any = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) A_ : Dict = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) A_ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) A_ : Any = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , ) A_ : Any = model.decode(__lowerCAmelCase , __lowerCAmelCase ) A_ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' ) def _a (self , lowercase , lowercase , lowercase ): A_ : Optional[int] = 20 A_ : Union[str, Any] = model_class_name(__lowerCAmelCase ) A_ : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] ) A_, A_ : Dict = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) A_ : str = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) A_ : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) A_ : Any = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) A_ : List[str] = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) A_ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) A_ : Tuple = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) A_ : Optional[int] = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase ) A_ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ): '''simple docstring''' if attention_mask is None: A_ : Any = np.not_equal(lowerCamelCase__ , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: A_ : List[str] = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class _lowerCAmelCase ( lowerCAmelCase_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Dict = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __SCREAMING_SNAKE_CASE : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __SCREAMING_SNAKE_CASE : Optional[Any] = True __SCREAMING_SNAKE_CASE : List[str] = False __SCREAMING_SNAKE_CASE : Dict = False __SCREAMING_SNAKE_CASE : str = False def _a (self ): A_ : List[str] = FlaxPegasusModelTester(self ) A_ : Union[str, Any] = ConfigTester(self , config_class=__lowerCAmelCase ) def _a (self ): self.config_tester.run_common_tests() def _a (self ): A_, A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def _a (self ): A_, A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def _a (self ): A_, A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): A_ : Union[str, Any] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) A_ : List[Any] = model_class(__lowerCAmelCase ) @jax.jit def encode_jitted(lowercase , lowercase=None , **lowercase ): return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase ) with self.subTest("""JIT Enabled""" ): A_ : Tuple = encode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): A_ : Union[str, Any] = encode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def _a (self ): A_, A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): A_ : int = model_class(__lowerCAmelCase ) A_ : Optional[int] = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) A_ : Optional[int] = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(lowercase , lowercase , lowercase ): return model.decode( decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , ) with self.subTest("""JIT Enabled""" ): A_ : Dict = decode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): A_ : Union[str, Any] = decode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _a (self ): for model_class_name in self.all_model_classes: A_ : List[str] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__lowerCAmelCase ) A_ : str = np.ones((1, 1) ) A_ : Optional[Any] = model(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @slow def _a (self ): A_ : str = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) A_ : int = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) A_ : Optional[Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] A_ : List[str] = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] A_ : str = tokenizer(__lowerCAmelCase , return_tensors="""np""" , truncation=__lowerCAmelCase , max_length=512 , padding=__lowerCAmelCase ) A_ : int = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences A_ : List[Any] = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) assert tgt_text == decoded
206
"""simple docstring""" UpperCAmelCase__ = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" # Return True if there is node that has not iterated. _UpperCAmelCase = [False] * len(lowercase ) _UpperCAmelCase = [s] _UpperCAmelCase = True while queue: _UpperCAmelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase ) _UpperCAmelCase = True _UpperCAmelCase = u return visited[t] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = [-1] * (len(lowercase )) _UpperCAmelCase = 0 _UpperCAmelCase = [] _UpperCAmelCase = [i[:] for i in graph] # Record original cut, copy. while bfs(lowercase ,lowercase ,lowercase ,lowercase ): _UpperCAmelCase = float("""Inf""" ) _UpperCAmelCase = sink while s != source: # Find the minimum value in select path _UpperCAmelCase = min(lowercase ,graph[parent[s]][s] ) _UpperCAmelCase = parent[s] max_flow += path_flow _UpperCAmelCase = sink while v != source: _UpperCAmelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCAmelCase = parent[v] for i in range(len(lowercase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
289
0
"""simple docstring""" import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def A_ ( ): '''simple docstring''' snake_case_ :Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--model_ckpt""", type=_lowercase, default="""microsoft/unixcoder-base-nine""" ) parser.add_argument("""--num_epochs""", type=_lowercase, default=5 ) parser.add_argument("""--batch_size""", type=_lowercase, default=6 ) parser.add_argument("""--gradient_accumulation_steps""", type=_lowercase, default=1 ) parser.add_argument("""--freeze""", type=_lowercase, default=_lowercase ) parser.add_argument("""--learning_rate""", type=_lowercase, default=5e-4 ) parser.add_argument("""--seed""", type=_lowercase, default=0 ) parser.add_argument("""--lr_scheduler_type""", type=_lowercase, default="""cosine""" ) parser.add_argument("""--num_warmup_steps""", type=_lowercase, default=10 ) parser.add_argument("""--weight_decay""", type=_lowercase, default=0.01 ) parser.add_argument("""--output_dir""", type=_lowercase, default="""./results""" ) return parser.parse_args() __a = load("accuracy") def A_ ( _lowercase ): '''simple docstring''' snake_case_, snake_case_ :Dict = eval_pred snake_case_ :Optional[int] = np.argmax(_lowercase, axis=1 ) return metric.compute(predictions=_lowercase, references=_lowercase ) class lowerCamelCase ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self: List[str] , snake_case: Tuple ) -> Tuple: super().__init__() snake_case_ :int = trainer def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Any , snake_case: List[Any] , snake_case: Tuple , **snake_case: Dict ) -> Optional[Any]: if control.should_evaluate: snake_case_ :Tuple = deepcopy(__lowerCAmelCase ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" ) return control_copy def A_ ( ): '''simple docstring''' snake_case_ :Optional[Any] = get_args() set_seed(args.seed ) snake_case_ :Union[str, Any] = load_dataset("""codeparrot/codecomplex""", split="""train""" ) snake_case_ :Any = dataset.train_test_split(test_size=0.2 ) snake_case_ :str = train_test["""test"""].train_test_split(test_size=0.5 ) snake_case_ :str = DatasetDict( { """train""": train_test["""train"""], """test""": test_validation["""train"""], """valid""": test_validation["""test"""], } ) print("""Loading tokenizer and model""" ) snake_case_ :List[str] = AutoTokenizer.from_pretrained(args.model_ckpt ) snake_case_ :Tuple = tokenizer.eos_token snake_case_ :str = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 ) snake_case_ :int = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): snake_case_ :Tuple = False snake_case_ :Tuple = ClassLabel(num_classes=7, names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) ) def tokenize(_lowercase ): snake_case_ :Dict = tokenizer(example["""src"""], truncation=_lowercase, max_length=1024 ) snake_case_ :str = labels.straint(example["""complexity"""] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } snake_case_ :Dict = train_test_validation.map( _lowercase, batched=_lowercase, remove_columns=train_test_validation["""train"""].column_names, ) snake_case_ :Union[str, Any] = DataCollatorWithPadding(tokenizer=_lowercase ) snake_case_ :List[Any] = TrainingArguments( output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy="""epoch""", save_strategy="""epoch""", logging_strategy="""epoch""", per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model="""accuracy""", run_name="""complexity-java""", report_to="""wandb""", ) snake_case_ :List[Any] = Trainer( model=_lowercase, args=_lowercase, train_dataset=tokenized_datasets["""train"""], eval_dataset=tokenized_datasets["""valid"""], tokenizer=_lowercase, data_collator=_lowercase, compute_metrics=_lowercase, ) print("""Training...""" ) trainer.add_callback(CustomCallback(_lowercase ) ) trainer.train() if __name__ == "__main__": main()
66
"""simple docstring""" import math class a : def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ): _UpperCAmelCase = 0.0 _UpperCAmelCase = 0.0 for i in range(len(__lowerCAmelCase ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : list[list[int | float]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : float ): for i in range(len(__lowerCAmelCase ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def __UpperCAmelCase ( ): """simple docstring""" # Training Examples ( m, n ) _UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _UpperCAmelCase = SelfOrganizingMap() _UpperCAmelCase = 3 _UpperCAmelCase = 0.5 for _ in range(lowercase ): for j in range(len(lowercase ) ): # training sample _UpperCAmelCase = training_samples[j] # Compute the winning vector _UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase ) # Update the winning vector _UpperCAmelCase = self_organizing_map.update(lowercase ,lowercase ,lowercase ,lowercase ) # classify test sample _UpperCAmelCase = [0, 0, 0, 1] _UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase ) # results print(f'''Clusters that the test sample belongs to : {winner}''' ) print(f'''Weights that have been trained : {weights}''' ) # running the main() function if __name__ == "__main__": main()
289
0
'''simple docstring''' import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def a ( __a , __a , __a , __a ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ :str = s.rsplit(__a , __a ) return new.join(__a ) def a ( __a ) -> List[str]: '''simple docstring''' return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def a ( __a ) -> List[Any]: '''simple docstring''' UpperCamelCase__ :int = {} UpperCamelCase__ :Tuple = ['''group_1''', '''group_2''', '''group_3''', '''group_4'''] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: UpperCamelCase__ :int = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' ) if "res_path" in key: UpperCamelCase__ :List[str] = key.replace('''res_path.''' , '''res_path.path.''' ) if key.endswith('''.w''' ): UpperCamelCase__ :Optional[Any] = rreplace(__a , '''.w''' , '''.weight''' , 1 ) if key.endswith('''.b''' ): UpperCamelCase__ :List[Any] = rreplace(__a , '''.b''' , '''.bias''' , 1 ) UpperCamelCase__ :Optional[int] = value.float() return upgrade @torch.no_grad() def a ( __a , __a , __a=None , __a=True ) -> int: '''simple docstring''' from dall_e import Encoder UpperCamelCase__ :Dict = Encoder() if os.path.exists(__a ): UpperCamelCase__ :Dict = torch.load(__a ) else: UpperCamelCase__ :Tuple = torch.hub.load_state_dict_from_url(__a ) if isinstance(__a , __a ): UpperCamelCase__ :Optional[int] = ckpt.state_dict() encoder.load_state_dict(__a ) if config_path is not None: UpperCamelCase__ :int = FlavaImageCodebookConfig.from_pretrained(__a ) else: UpperCamelCase__ :int = FlavaImageCodebookConfig() UpperCamelCase__ :str = FlavaImageCodebook(__a ).eval() UpperCamelCase__ :Tuple = encoder.state_dict() UpperCamelCase__ :str = upgrade_state_dict(__a ) hf_model.load_state_dict(__a ) UpperCamelCase__ :Union[str, Any] = hf_model.state_dict() UpperCamelCase__ :List[Any] = count_parameters(__a ) UpperCamelCase__ :Any = count_parameters(__a ) assert torch.allclose(__a , __a , atol=1e-3 ) if save_checkpoint: hf_model.save_pretrained(__a ) else: return hf_state_dict if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') __snake_case = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
97
"""simple docstring""" import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class a : def __init__( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=99 , __lowerCAmelCase : int=64 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Union[str, Any]=64 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : str=512 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : str=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def lowerCAmelCase_ ( self : Union[str, Any] ): return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Optional[int] ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ): _UpperCAmelCase = MPNetModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ): _UpperCAmelCase = MPNetForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = MPNetForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = MPNetForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = MPNetForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.prepare_config_and_inputs() ((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : List[Any] = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) _snake_case : Union[str, Any] = ( { 'feature-extraction': MPNetModel, 'fill-mask': MPNetForMaskedLM, 'question-answering': MPNetForQuestionAnswering, 'text-classification': MPNetForSequenceClassification, 'token-classification': MPNetForTokenClassification, 'zero-shot': MPNetForSequenceClassification, } if is_torch_available() else {} ) _snake_case : int = False _snake_case : List[Any] = True def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = MPNetModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Dict ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*__lowerCAmelCase ) @require_torch class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = MPNetModel.from_pretrained("""microsoft/mpnet-base""" ) _UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCAmelCase = model(__lowerCAmelCase )[0] _UpperCAmelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = torch.tensor( [[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
289
0
from timeit import timeit def _a ( SCREAMING_SNAKE_CASE : Any ) -> List[str]: """simple docstring""" if number < 0: raise ValueError('the value of input must not be negative' ) __lowerCAmelCase: Optional[Any] = 0 while number: number &= number - 1 result += 1 return result def _a ( SCREAMING_SNAKE_CASE : List[Any] ) -> int: """simple docstring""" if number < 0: raise ValueError('the value of input must not be negative' ) __lowerCAmelCase: Union[str, Any] = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def _a ( ) -> List[str]: """simple docstring""" def do_benchmark(SCREAMING_SNAKE_CASE : Optional[Any] ) -> None: __lowerCAmelCase: List[Any] = 'import __main__ as z' print(f'''Benchmark when {number = }:''' ) print(f'''{get_set_bits_count_using_modulo_operator(SCREAMING_SNAKE_CASE ) = }''' ) __lowerCAmelCase: List[str] = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=SCREAMING_SNAKE_CASE ) print(f'''timeit() runs in {timing} seconds''' ) print(f'''{get_set_bits_count_using_brian_kernighans_algorithm(SCREAMING_SNAKE_CASE ) = }''' ) __lowerCAmelCase: Tuple = timeit( 'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=SCREAMING_SNAKE_CASE , ) print(f'''timeit() runs in {timing} seconds''' ) for number in (25, 37, 58, 0): do_benchmark(SCREAMING_SNAKE_CASE ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
322
"""simple docstring""" UpperCAmelCase__ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) UpperCAmelCase__ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 1_2, """Pm""": 1_5, """Em""": 1_8, """Zm""": 2_1, """Ym""": 2_4, } def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = from_type.lower().strip("""s""" ) _UpperCAmelCase = to_type.lower().strip("""s""" ) _UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase ) _UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase ) if from_sanitized not in METRIC_CONVERSION: _UpperCAmelCase = ( f'''Invalid \'from_type\' value: {from_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) if to_sanitized not in METRIC_CONVERSION: _UpperCAmelCase = ( f'''Invalid \'to_type\' value: {to_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) _UpperCAmelCase = METRIC_CONVERSION[from_sanitized] _UpperCAmelCase = METRIC_CONVERSION[to_sanitized] _UpperCAmelCase = 1 if from_exponent > to_exponent: _UpperCAmelCase = from_exponent - to_exponent else: _UpperCAmelCase = -(to_exponent - from_exponent) return value * pow(10 ,lowercase ) if __name__ == "__main__": from doctest import testmod testmod()
289
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''', '''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''', } class __a ( lowerCAmelCase_ ): __lowercase : List[str] = 'roberta' def __init__( self , lowerCAmelCase__=50_265 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3_072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Tuple: '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) lowercase__: Any = vocab_size lowercase__: Optional[Any] = hidden_size lowercase__: List[Any] = num_hidden_layers lowercase__: int = num_attention_heads lowercase__: Tuple = hidden_act lowercase__: Union[str, Any] = intermediate_size lowercase__: Any = hidden_dropout_prob lowercase__: str = attention_probs_dropout_prob lowercase__: int = max_position_embeddings lowercase__: Optional[int] = type_vocab_size lowercase__: Optional[int] = initializer_range lowercase__: int = layer_norm_eps lowercase__: List[Any] = position_embedding_type lowercase__: Optional[Any] = use_cache lowercase__: str = classifier_dropout class __a ( lowerCAmelCase_ ): @property def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]: '''simple docstring''' if self.task == "multiple-choice": lowercase__: List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: lowercase__: Dict = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
196
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase__ = 1_6 UpperCAmelCase__ = 3_2 def __UpperCAmelCase ( lowercase ,lowercase = 16 ): """simple docstring""" _UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) _UpperCAmelCase = DataLoader( tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCAmelCase__ = mocked_dataloaders # noqa: F811 def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1": _UpperCAmelCase = 2 # Initialize accelerator _UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config["""lr"""] _UpperCAmelCase = int(config["""num_epochs"""] ) _UpperCAmelCase = int(config["""seed"""] ) _UpperCAmelCase = int(config["""batch_size"""] ) _UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowercase ) def inner_training_loop(lowercase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(lowercase ,lowercase ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowercase ,references=lowercase ,) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' ,lowercase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" ,) parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowercase ,lowercase ) if __name__ == "__main__": main()
289
0
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=lowerCAmelCase_ ) class _snake_case ( lowerCAmelCase_ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization lowerCAmelCase_ : str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} ) lowerCAmelCase_ : ClassVar[Features] = Features({"text": Value("string" )} ) lowerCAmelCase_ : ClassVar[Features] = Features({"summary": Value("string" )} ) lowerCAmelCase_ : str = "text" lowerCAmelCase_ : str = "summary" @property def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' return {self.text_column: "text", self.summary_column: "summary"}
85
"""simple docstring""" import warnings warnings.warn( """memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """ """`from accelerate import find_executable_batch_size` to avoid this warning.""", FutureWarning, )
289
0
"""simple docstring""" import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) enable_full_determinism() class lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _SCREAMING_SNAKE_CASE = UNetaDModel _SCREAMING_SNAKE_CASE = 'sample' @property def _snake_case ( self ) -> List[str]: lowerCAmelCase = 4 lowerCAmelCase = 3 lowerCAmelCase = (32, 32) lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) lowerCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def _snake_case ( self ) -> Tuple: return (3, 32, 32) @property def _snake_case ( self ) -> Tuple: return (3, 32, 32) def _snake_case ( self ) -> Optional[Any]: lowerCAmelCase = { """block_out_channels""": (32, 64), """down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""), """up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""), """attention_head_dim""": 3, """out_channels""": 3, """in_channels""": 3, """layers_per_block""": 2, """sample_size""": 32, } lowerCAmelCase = self.dummy_input return init_dict, inputs_dict class lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _SCREAMING_SNAKE_CASE = UNetaDModel _SCREAMING_SNAKE_CASE = 'sample' @property def _snake_case ( self ) -> Optional[int]: lowerCAmelCase = 4 lowerCAmelCase = 4 lowerCAmelCase = (32, 32) lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) lowerCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def _snake_case ( self ) -> int: return (4, 32, 32) @property def _snake_case ( self ) -> Optional[int]: return (4, 32, 32) def _snake_case ( self ) -> Union[str, Any]: lowerCAmelCase = { """sample_size""": 32, """in_channels""": 4, """out_channels""": 4, """layers_per_block""": 2, """block_out_channels""": (32, 64), """attention_head_dim""": 32, """down_block_types""": ("""DownBlock2D""", """DownBlock2D"""), """up_block_types""": ("""UpBlock2D""", """UpBlock2D"""), } lowerCAmelCase = self.dummy_input return init_dict, inputs_dict def _snake_case ( self ) -> Dict: lowerCAmelCase , lowerCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) lowerCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def _snake_case ( self ) -> Union[str, Any]: lowerCAmelCase , lowerCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) model.to(__lowerCAmelCase ) lowerCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def _snake_case ( self ) -> List[Any]: # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` lowerCAmelCase , lowerCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) model_accelerate.to(__lowerCAmelCase ) model_accelerate.eval() lowerCAmelCase = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) lowerCAmelCase = noise.to(__lowerCAmelCase ) lowerCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase ) lowerCAmelCase = model_accelerate(__lowerCAmelCase , __lowerCAmelCase )["""sample"""] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() lowerCAmelCase , lowerCAmelCase = UNetaDModel.from_pretrained( """fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase , low_cpu_mem_usage=__lowerCAmelCase ) model_normal_load.to(__lowerCAmelCase ) model_normal_load.eval() lowerCAmelCase = model_normal_load(__lowerCAmelCase , __lowerCAmelCase )["""sample"""] assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) def _snake_case ( self ) -> int: lowerCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ) model.eval() model.to(__lowerCAmelCase ) lowerCAmelCase = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) lowerCAmelCase = noise.to(__lowerCAmelCase ) lowerCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase ) with torch.no_grad(): lowerCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample lowerCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off lowerCAmelCase = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) ) class lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _SCREAMING_SNAKE_CASE = UNetaDModel _SCREAMING_SNAKE_CASE = 'sample' @property def _snake_case ( self , lowercase=(32, 32) ) -> str: lowerCAmelCase = 4 lowerCAmelCase = 3 lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) lowerCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def _snake_case ( self ) -> Tuple: return (3, 32, 32) @property def _snake_case ( self ) -> List[str]: return (3, 32, 32) def _snake_case ( self ) -> Tuple: lowerCAmelCase = { """block_out_channels""": [32, 64, 64, 64], """in_channels""": 3, """layers_per_block""": 1, """out_channels""": 3, """time_embedding_type""": """fourier""", """norm_eps""": 1e-6, """mid_block_scale_factor""": math.sqrt(2.0 ), """norm_num_groups""": None, """down_block_types""": [ """SkipDownBlock2D""", """AttnSkipDownBlock2D""", """SkipDownBlock2D""", """SkipDownBlock2D""", ], """up_block_types""": [ """SkipUpBlock2D""", """SkipUpBlock2D""", """AttnSkipUpBlock2D""", """SkipUpBlock2D""", ], } lowerCAmelCase = self.dummy_input return init_dict, inputs_dict @slow def _snake_case ( self ) -> Optional[Any]: lowerCAmelCase , lowerCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) lowerCAmelCase = self.dummy_input lowerCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(__lowerCAmelCase ) lowerCAmelCase = noise lowerCAmelCase = model(**__lowerCAmelCase ) assert image is not None, "Make sure output is not None" @slow def _snake_case ( self ) -> Union[str, Any]: lowerCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ) model.to(__lowerCAmelCase ) lowerCAmelCase = 4 lowerCAmelCase = 3 lowerCAmelCase = (256, 256) lowerCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) lowerCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase ) with torch.no_grad(): lowerCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample lowerCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off lowerCAmelCase = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) def _snake_case ( self ) -> List[Any]: lowerCAmelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" ) model.to(__lowerCAmelCase ) lowerCAmelCase = 4 lowerCAmelCase = 3 lowerCAmelCase = (32, 32) lowerCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) lowerCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase ) with torch.no_grad(): lowerCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample lowerCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off lowerCAmelCase = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) def _snake_case ( self ) -> Union[str, Any]: # not required for this model pass
46
"""simple docstring""" import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin UpperCAmelCase__ = logging.get_logger(__name__) enable_full_determinism() class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[int] = UNetaDModel _snake_case : List[str] = 'sample' @property def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (32, 32) _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : List[Any] ): return (3, 32, 32) @property def lowerCAmelCase_ ( self : Optional[Any] ): return (3, 32, 32) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = { """block_out_channels""": (32, 64), """down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""), """up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""), """attention_head_dim""": 3, """out_channels""": 3, """in_channels""": 3, """layers_per_block""": 2, """sample_size""": 32, } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : int = UNetaDModel _snake_case : Optional[Any] = 'sample' @property def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = 4 _UpperCAmelCase = 4 _UpperCAmelCase = (32, 32) _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : Optional[Any] ): return (4, 32, 32) @property def lowerCAmelCase_ ( self : Dict ): return (4, 32, 32) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = { """sample_size""": 32, """in_channels""": 4, """out_channels""": 4, """layers_per_block""": 2, """block_out_channels""": (32, 64), """attention_head_dim""": 32, """down_block_types""": ("""DownBlock2D""", """DownBlock2D"""), """up_block_types""": ("""UpBlock2D""", """UpBlock2D"""), } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) _UpperCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) model.to(__lowerCAmelCase ) _UpperCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self : str ): # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) model_accelerate.to(__lowerCAmelCase ) model_accelerate.eval() _UpperCAmelCase = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) _UpperCAmelCase = noise.to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase ) _UpperCAmelCase = model_accelerate(__lowerCAmelCase , __lowerCAmelCase )["""sample"""] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained( """fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase , low_cpu_mem_usage=__lowerCAmelCase ) model_normal_load.to(__lowerCAmelCase ) model_normal_load.eval() _UpperCAmelCase = model_normal_load(__lowerCAmelCase , __lowerCAmelCase )["""sample"""] assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ) model.eval() model.to(__lowerCAmelCase ) _UpperCAmelCase = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) _UpperCAmelCase = noise.to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) ) class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[Any] = UNetaDModel _snake_case : str = 'sample' @property def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str=(32, 32) ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : Any ): return (3, 32, 32) @property def lowerCAmelCase_ ( self : Union[str, Any] ): return (3, 32, 32) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = { """block_out_channels""": [32, 64, 64, 64], """in_channels""": 3, """layers_per_block""": 1, """out_channels""": 3, """time_embedding_type""": """fourier""", """norm_eps""": 1e-6, """mid_block_scale_factor""": math.sqrt(2.0 ), """norm_num_groups""": None, """down_block_types""": [ """SkipDownBlock2D""", """AttnSkipDownBlock2D""", """SkipDownBlock2D""", """SkipDownBlock2D""", ], """up_block_types""": [ """SkipUpBlock2D""", """SkipUpBlock2D""", """AttnSkipUpBlock2D""", """SkipUpBlock2D""", ], } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict @slow def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) _UpperCAmelCase = self.dummy_input _UpperCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(__lowerCAmelCase ) _UpperCAmelCase = noise _UpperCAmelCase = model(**__lowerCAmelCase ) assert image is not None, "Make sure output is not None" @slow def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ) model.to(__lowerCAmelCase ) _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (256, 256) _UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" ) model.to(__lowerCAmelCase ) _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (32, 32) _UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) def lowerCAmelCase_ ( self : List[str] ): # not required for this model pass
289
0
from __future__ import annotations A__ = 10 def _lowerCAmelCase ( __lowerCAmelCase ) -> List[Any]: """simple docstring""" snake_case__ : Union[str, Any] = 1 snake_case__ : int = max(__lowerCAmelCase ) while placement <= max_digit: # declare and initialize empty buckets snake_case__ : Tuple = [[] for _ in range(__lowerCAmelCase )] # split list_of_ints between the buckets for i in list_of_ints: snake_case__ : Optional[Any] = int((i / placement) % RADIX ) buckets[tmp].append(__lowerCAmelCase ) # put each buckets' contents into list_of_ints snake_case__ : int = 0 for b in range(__lowerCAmelCase ): for i in buckets[b]: snake_case__ : List[str] = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
230
"""simple docstring""" import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : int = StableUnCLIPPipeline _snake_case : str = TEXT_TO_IMAGE_PARAMS _snake_case : Any = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS _snake_case : str = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _snake_case : str = False def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = 32 _UpperCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) _UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=__lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) _UpperCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__lowerCAmelCase , num_layers=1 , ) torch.manual_seed(0 ) _UpperCAmelCase = DDPMScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowerCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , ) # regular denoising components torch.manual_seed(0 ) _UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=__lowerCAmelCase ) _UpperCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) _UpperCAmelCase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCAmelCase , layers_per_block=1 , upcast_attention=__lowerCAmelCase , use_linear_projection=__lowerCAmelCase , ) torch.manual_seed(0 ) _UpperCAmelCase = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , ) torch.manual_seed(0 ) _UpperCAmelCase = AutoencoderKL() _UpperCAmelCase = { # prior components """prior_tokenizer""": prior_tokenizer, """prior_text_encoder""": prior_text_encoder, """prior""": prior, """prior_scheduler""": prior_scheduler, # image noising components """image_normalizer""": image_normalizer, """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder, """unet""": unet, """scheduler""": scheduler, """vae""": vae, } return components def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=0 ): if str(__lowerCAmelCase ).startswith("""mps""" ): _UpperCAmelCase = torch.manual_seed(__lowerCAmelCase ) else: _UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _UpperCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """prior_num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = torch_device == """cpu""" self._test_attention_slicing_forward_pass(test_max_difference=__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=__lowerCAmelCase ) @slow @require_torch_gpu class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : str ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" ) _UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) _UpperCAmelCase = pipe("""anime turle""" , generator=__lowerCAmelCase , output_type="""np""" ) _UpperCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) _UpperCAmelCase = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _UpperCAmelCase = pipe( """anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , ) _UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
289
0
"""simple docstring""" import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class _UpperCAmelCase ( unittest.TestCase): @require_torch def __snake_case ( self ) -> List[str]: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = pipeline( task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" ) _UpperCAmelCase : Dict = load_dataset("""ashraq/esc50""" ) _UpperCAmelCase : Dict = dataset["""train"""]["""audio"""][-1]["""array"""] _UpperCAmelCase : Tuple = audio_classifier(__lowerCAmelCase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , ) @unittest.skip("""No models are available in TF""" ) def __snake_case ( self ) -> List[Any]: '''simple docstring''' pass @slow @require_torch def __snake_case ( self ) -> Dict: '''simple docstring''' _UpperCAmelCase : List[str] = pipeline( task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , ) # This is an audio of a dog _UpperCAmelCase : Any = load_dataset("""ashraq/esc50""" ) _UpperCAmelCase : Any = dataset["""train"""]["""audio"""][-1]["""array"""] _UpperCAmelCase : Optional[int] = audio_classifier(__lowerCAmelCase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {"""score""": 0.999, """label""": """Sound of a dog"""}, {"""score""": 0.001, """label""": """Sound of vaccum cleaner"""}, ] , ) _UpperCAmelCase : int = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {"""score""": 0.999, """label""": """Sound of a dog"""}, {"""score""": 0.001, """label""": """Sound of vaccum cleaner"""}, ], ] * 5 , ) _UpperCAmelCase : int = audio_classifier( [audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {"""score""": 0.999, """label""": """Sound of a dog"""}, {"""score""": 0.001, """label""": """Sound of vaccum cleaner"""}, ], ] * 5 , ) @unittest.skip("""No models are available in TF""" ) def __snake_case ( self ) -> List[Any]: '''simple docstring''' pass
246
"""simple docstring""" from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
289
0
'''simple docstring''' import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A ( lowerCAmelCase_ , unittest.TestCase ): lowerCamelCase : Union[str, Any] = LayoutLMTokenizer lowerCamelCase : List[str] = LayoutLMTokenizerFast lowerCamelCase : int = True lowerCamelCase : Tuple = True def A__ ( self ) -> int: '''simple docstring''' super().setUp() lowercase__ = [ """[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def A__ ( self , **lowerCamelCase__ ) -> Dict: '''simple docstring''' return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def A__ ( self , lowerCamelCase__ ) -> Dict: '''simple docstring''' lowercase__ = """UNwant\u00E9d,running""" lowercase__ = """unwanted, running""" return input_text, output_text def A__ ( self ) -> str: '''simple docstring''' lowercase__ = self.tokenizer_class(self.vocab_file ) lowercase__ = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(__lowerCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [7, 4, 5, 10, 8, 9] ) def A__ ( self ) -> Tuple: '''simple docstring''' pass
164
"""simple docstring""" import requests UpperCAmelCase__ = """""" # <-- Put your OpenWeatherMap appid here! UpperCAmelCase__ = """https://api.openweathermap.org/data/2.5/""" def __UpperCAmelCase ( lowercase = "Chicago" ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """weather""" ,params=locals() ).json() def __UpperCAmelCase ( lowercase = "Kolkata, India" ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """forecast""" ,params=locals() ).json() def __UpperCAmelCase ( lowercase = 55.68 ,lowercase = 12.57 ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """onecall""" ,params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: UpperCAmelCase__ = input("""Enter a location:""").strip() if location: pprint(current_weather(location)) else: break
289
0
"""simple docstring""" from __future__ import annotations from random import choice def a_ ( lowerCamelCase ): return choice(lowerCamelCase ) def a_ ( lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = random_pivot(lowerCamelCase ) # partition based on pivot # linear time UpperCAmelCase__ = [e for e in lst if e < pivot] UpperCAmelCase__ = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(lowerCamelCase ) == k - 1: return pivot # pivot is in elements bigger than k elif len(lowerCamelCase ) < k - 1: return kth_number(lowerCamelCase , k - len(lowerCamelCase ) - 1 ) # pivot is in elements smaller than k else: return kth_number(lowerCamelCase , lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
98
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = get_failure_array(lowercase ) # 2) Step through text searching for pattern _UpperCAmelCase , _UpperCAmelCase = 0, 0 # index into text, pattern while i < len(lowercase ): if pattern[j] == text[i]: if j == (len(lowercase ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: _UpperCAmelCase = failure[j - 1] continue i += 1 return False def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [0] _UpperCAmelCase = 0 _UpperCAmelCase = 1 while j < len(lowercase ): if pattern[i] == pattern[j]: i += 1 elif i > 0: _UpperCAmelCase = failure[i - 1] continue j += 1 failure.append(lowercase ) return failure if __name__ == "__main__": # Test 1) UpperCAmelCase__ = """abc1abc12""" UpperCAmelCase__ = """alskfjaldsabc1abc1abc12k23adsfabcabc""" UpperCAmelCase__ = """alskfjaldsk23adsfabcabc""" assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) UpperCAmelCase__ = """ABABX""" UpperCAmelCase__ = """ABABZABABYABABX""" assert kmp(pattern, text) # Test 3) UpperCAmelCase__ = """AAAB""" UpperCAmelCase__ = """ABAAAAAB""" assert kmp(pattern, text) # Test 4) UpperCAmelCase__ = """abcdabcy""" UpperCAmelCase__ = """abcxabcdabxabcdabcdabcy""" assert kmp(pattern, text) # Test 5) UpperCAmelCase__ = """aabaabaaa""" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
289
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase :Optional[int] = logging.get_logger(__name__) lowerCamelCase :Optional[int] = { '''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class _lowerCAmelCase ( lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE : Optional[int] = 'wav2vec2' def __init__(self , lowercase=32 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="group" , lowercase="gelu" , lowercase=(512, 512, 512, 512, 512, 512, 512) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(10, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=128 , lowercase=16 , lowercase=False , lowercase=True , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase=320 , lowercase=2 , lowercase=0.1 , lowercase=100 , lowercase=256 , lowercase=256 , lowercase=0.1 , lowercase="sum" , lowercase=False , lowercase=False , lowercase=256 , lowercase=(512, 512, 512, 512, 1500) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=512 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , lowercase=None , **lowercase , ): super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase ) A_ : int = hidden_size A_ : Union[str, Any] = feat_extract_norm A_ : Optional[Any] = feat_extract_activation A_ : int = list(__lowerCAmelCase ) A_ : Union[str, Any] = list(__lowerCAmelCase ) A_ : Optional[int] = list(__lowerCAmelCase ) A_ : Any = conv_bias A_ : str = num_conv_pos_embeddings A_ : Dict = num_conv_pos_embedding_groups A_ : Dict = len(self.conv_dim ) A_ : Any = num_hidden_layers A_ : Union[str, Any] = intermediate_size A_ : Optional[Any] = hidden_act A_ : Any = num_attention_heads A_ : int = hidden_dropout A_ : Union[str, Any] = attention_dropout A_ : Any = activation_dropout A_ : List[str] = feat_proj_dropout A_ : Dict = final_dropout A_ : Optional[Any] = layerdrop A_ : int = layer_norm_eps A_ : Tuple = initializer_range A_ : Tuple = vocab_size A_ : Tuple = do_stable_layer_norm A_ : List[Any] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A_ : Dict = apply_spec_augment A_ : List[Any] = mask_time_prob A_ : Optional[int] = mask_time_length A_ : int = mask_time_min_masks A_ : Union[str, Any] = mask_feature_prob A_ : Dict = mask_feature_length A_ : int = mask_feature_min_masks # parameters for pretraining with codevector quantized representations A_ : List[Any] = num_codevectors_per_group A_ : List[Any] = num_codevector_groups A_ : Optional[Any] = contrastive_logits_temperature A_ : Tuple = feat_quantizer_dropout A_ : Union[str, Any] = num_negatives A_ : Optional[Any] = codevector_dim A_ : Optional[Any] = proj_codevector_dim A_ : int = diversity_loss_weight # ctc loss A_ : str = ctc_loss_reduction A_ : int = ctc_zero_infinity # adapter A_ : Optional[Any] = add_adapter A_ : int = adapter_kernel_size A_ : List[Any] = adapter_stride A_ : Union[str, Any] = num_adapter_layers A_ : Optional[int] = output_hidden_size or hidden_size A_ : int = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. A_ : List[str] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. A_ : Optional[Any] = list(__lowerCAmelCase ) A_ : str = list(__lowerCAmelCase ) A_ : Tuple = list(__lowerCAmelCase ) A_ : Tuple = xvector_output_dim @property def _a (self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
206
"""simple docstring""" from sklearn.metrics import recall_score import datasets UpperCAmelCase__ = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ UpperCAmelCase__ = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ UpperCAmelCase__ = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): def lowerCAmelCase_ ( self : Tuple ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]="binary" , __lowerCAmelCase : Any=None , __lowerCAmelCase : int="warn" , ): _UpperCAmelCase = recall_score( __lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase , zero_division=__lowerCAmelCase , ) return {"recall": float(__lowerCAmelCase ) if score.size == 1 else score}
289
0
"""simple docstring""" def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :List[Any] = [1] for i in range(2, _lowercase ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" snake_case_ :List[Any] = [] snake_case_ :int = list(range(_lowercase ) ) # Find permutation while factorials: snake_case_ :Any = factorials.pop() snake_case_, snake_case_ :Union[str, Any] = divmod(_lowercase, _lowercase ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
66
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase__ = """platform""" import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class a : _snake_case : Tuple = PegasusConfig _snake_case : int = {} _snake_case : str = 'gelu' def __init__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=99 , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=20 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Any=0 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = eos_token_id _UpperCAmelCase = pad_token_id _UpperCAmelCase = bos_token_id def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) _UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) _UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _UpperCAmelCase = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return config, inputs_dict def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ): _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] ) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ): _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] ) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _UpperCAmelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase ) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ,): """simple docstring""" if attention_mask is None: _UpperCAmelCase = np.not_equal(lowercase ,config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: _UpperCAmelCase = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ), ] ,axis=-1 ,) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Dict = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) _snake_case : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () _snake_case : Optional[Any] = True _snake_case : List[str] = False _snake_case : Dict = False _snake_case : str = False def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = FlaxPegasusModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_class(__lowerCAmelCase ) @jax.jit def encode_jitted(__lowerCAmelCase : str , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Dict ): return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase ) with self.subTest("""JIT Enabled""" ): _UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCAmelCase = model_class(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) _UpperCAmelCase = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(__lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ): return model.decode( decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , ) with self.subTest("""JIT Enabled""" ): _UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCAmelCase_ ( self : Optional[int] ): for model_class_name in self.all_model_classes: _UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__lowerCAmelCase ) _UpperCAmelCase = np.ones((1, 1) ) _UpperCAmelCase = model(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) _UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) _UpperCAmelCase = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] _UpperCAmelCase = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] _UpperCAmelCase = tokenizer(__lowerCAmelCase , return_tensors="""np""" , truncation=__lowerCAmelCase , max_length=512 , padding=__lowerCAmelCase ) _UpperCAmelCase = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences _UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) assert tgt_text == decoded
289
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class lowercase ( unittest.TestCase ): """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=18 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , ): '''simple docstring''' UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 20} UpperCamelCase__ :Tuple = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} UpperCamelCase__ :Optional[Any] = parent UpperCamelCase__ :str = batch_size UpperCamelCase__ :Dict = num_channels UpperCamelCase__ :Optional[Any] = image_size UpperCamelCase__ :Optional[int] = min_resolution UpperCamelCase__ :Union[str, Any] = max_resolution UpperCamelCase__ :Optional[int] = do_resize UpperCamelCase__ :str = size UpperCamelCase__ :Union[str, Any] = do_center_crop UpperCamelCase__ :str = crop_size UpperCamelCase__ :Tuple = do_flip_channel_order def lowerCAmelCase__ ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class lowercase ( lowerCAmelCase_ , unittest.TestCase ): """simple docstring""" _a = MobileViTImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Union[str, Any] = MobileViTImageProcessingTester(self ) @property def lowerCAmelCase__ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_center_crop''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''center_crop''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_flip_channel_order''' ) ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) UpperCamelCase__ :str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def lowerCAmelCase__ ( self ): '''simple docstring''' pass def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase__ :Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCamelCase__ :Tuple = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCamelCase__ :str = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input UpperCamelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCamelCase__ :Tuple = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
97
"""simple docstring""" import math def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [] _UpperCAmelCase = 2 _UpperCAmelCase = int(math.sqrt(lowercase ) ) # Size of every segment _UpperCAmelCase = [True] * (end + 1) _UpperCAmelCase = [] while start <= end: if temp[start] is True: in_prime.append(lowercase ) for i in range(start * start ,end + 1 ,lowercase ): _UpperCAmelCase = False start += 1 prime += in_prime _UpperCAmelCase = end + 1 _UpperCAmelCase = min(2 * end ,lowercase ) while low <= n: _UpperCAmelCase = [True] * (high - low + 1) for each in in_prime: _UpperCAmelCase = math.floor(low / each ) * each if t < low: t += each for j in range(lowercase ,high + 1 ,lowercase ): _UpperCAmelCase = False for j in range(len(lowercase ) ): if temp[j] is True: prime.append(j + low ) _UpperCAmelCase = high + 1 _UpperCAmelCase = min(high + end ,lowercase ) return prime print(sieve(1_0**6))
289
0
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py _a = '''src/diffusers''' _a = '''.''' # This is to make sure the diffusers module imported is the one in the repo. _a = importlib.util.spec_from_file_location( '''diffusers''', os.path.join(DIFFUSERS_PATH, '''__init__.py'''), submodule_search_locations=[DIFFUSERS_PATH], ) _a = spec.loader.load_module() def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]: """simple docstring""" return line.startswith(SCREAMING_SNAKE_CASE ) or len(SCREAMING_SNAKE_CASE ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , SCREAMING_SNAKE_CASE ) is not None def _a ( SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]: """simple docstring""" __lowerCAmelCase: Tuple = object_name.split('.' ) __lowerCAmelCase: Tuple = 0 # First let's find the module where our object lives. __lowerCAmelCase: int = parts[i] while i < len(SCREAMING_SNAKE_CASE ) and not os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , f'''{module}.py''' ) ): i += 1 if i < len(SCREAMING_SNAKE_CASE ): __lowerCAmelCase: List[Any] = os.path.join(SCREAMING_SNAKE_CASE , parts[i] ) if i >= len(SCREAMING_SNAKE_CASE ): raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , f'''{module}.py''' ) , 'r' , encoding='utf-8' , newline='\n' ) as f: __lowerCAmelCase: int = f.readlines() # Now let's find the class / func in the code! __lowerCAmelCase: int = '' __lowerCAmelCase: Tuple = 0 for name in parts[i + 1 :]: while ( line_index < len(SCREAMING_SNAKE_CASE ) and re.search(Rf'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(SCREAMING_SNAKE_CASE ): raise ValueError(f''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). __lowerCAmelCase: Any = line_index while line_index < len(SCREAMING_SNAKE_CASE ) and _should_continue(lines[line_index] , SCREAMING_SNAKE_CASE ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __lowerCAmelCase: Optional[int] = lines[start_index:line_index] return "".join(SCREAMING_SNAKE_CASE ) _a = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''') _a = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''') _a = re.compile(R'''<FILL\s+[^>]*>''') def _a ( SCREAMING_SNAKE_CASE : Tuple ) -> List[str]: """simple docstring""" __lowerCAmelCase: int = code.split('\n' ) __lowerCAmelCase: Tuple = 0 while idx < len(SCREAMING_SNAKE_CASE ) and len(lines[idx] ) == 0: idx += 1 if idx < len(SCREAMING_SNAKE_CASE ): return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0] return "" def _a ( SCREAMING_SNAKE_CASE : Dict ) -> Tuple: """simple docstring""" __lowerCAmelCase: List[str] = len(get_indent(SCREAMING_SNAKE_CASE ) ) > 0 if has_indent: __lowerCAmelCase: List[Any] = f'''class Bla:\n{code}''' __lowerCAmelCase: Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Dict = black.format_str(SCREAMING_SNAKE_CASE , mode=SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase: Any = style_docstrings_in_code(SCREAMING_SNAKE_CASE ) return result[len('class Bla:\n' ) :] if has_indent else result def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=False ) -> Tuple: """simple docstring""" with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f: __lowerCAmelCase: str = f.readlines() __lowerCAmelCase: Any = [] __lowerCAmelCase: Optional[Any] = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(SCREAMING_SNAKE_CASE ): __lowerCAmelCase: Any = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: str = search.groups() __lowerCAmelCase: Optional[Any] = find_code_in_diffusers(SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Tuple = get_indent(SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Any = line_index + 1 if indent == theoretical_indent else line_index + 2 __lowerCAmelCase: Optional[int] = theoretical_indent __lowerCAmelCase: int = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. __lowerCAmelCase: Optional[Any] = True while line_index < len(SCREAMING_SNAKE_CASE ) and should_continue: line_index += 1 if line_index >= len(SCREAMING_SNAKE_CASE ): break __lowerCAmelCase: Dict = lines[line_index] __lowerCAmelCase: Union[str, Any] = _should_continue(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and re.search(f'''^{indent}# End copy''' , SCREAMING_SNAKE_CASE ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __lowerCAmelCase: int = lines[start_index:line_index] __lowerCAmelCase: Optional[int] = ''.join(SCREAMING_SNAKE_CASE ) # Remove any nested `Copied from` comments to avoid circular copies __lowerCAmelCase: Optional[int] = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(SCREAMING_SNAKE_CASE ) is None] __lowerCAmelCase: int = '\n'.join(SCREAMING_SNAKE_CASE ) # Before comparing, use the `replace_pattern` on the original code. if len(SCREAMING_SNAKE_CASE ) > 0: __lowerCAmelCase: Optional[int] = replace_pattern.replace('with' , '' ).split(',' ) __lowerCAmelCase: Union[str, Any] = [_re_replace_pattern.search(SCREAMING_SNAKE_CASE ) for p in patterns] for pattern in patterns: if pattern is None: continue __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: str = pattern.groups() __lowerCAmelCase: Dict = re.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if option.strip() == "all-casing": __lowerCAmelCase: Optional[int] = re.sub(obja.lower() , obja.lower() , SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Optional[int] = re.sub(obja.upper() , obja.upper() , SCREAMING_SNAKE_CASE ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line __lowerCAmelCase: List[Any] = blackify(lines[start_index - 1] + theoretical_code ) __lowerCAmelCase: List[str] = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: __lowerCAmelCase: str = lines[:start_index] + [theoretical_code] + lines[line_index:] __lowerCAmelCase: Optional[Any] = start_index + 1 if overwrite and len(SCREAMING_SNAKE_CASE ) > 0: # Warn the user a file has been modified. print(f'''Detected changes, rewriting {filename}.''' ) with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(SCREAMING_SNAKE_CASE ) return diffs def _a ( SCREAMING_SNAKE_CASE : List[Any] = False ) -> List[Any]: """simple docstring""" __lowerCAmelCase: Tuple = glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '**/*.py' ) , recursive=SCREAMING_SNAKE_CASE ) __lowerCAmelCase: List[str] = [] for filename in all_files: __lowerCAmelCase: Union[str, Any] = is_copy_consistent(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(SCREAMING_SNAKE_CASE ) > 0: __lowerCAmelCase: Optional[Any] = '\n'.join(SCREAMING_SNAKE_CASE ) raise Exception( 'Found the following copy inconsistencies:\n' + diff + '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' ) if __name__ == "__main__": _a = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _a = parser.parse_args() check_copies(args.fix_and_overwrite)
322
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _UpperCAmelCase = TapasConfig.from_json_file(lowercase ) # set absolute/relative position embeddings parameter _UpperCAmelCase = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "WTQ": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = True # hparam_utils.py hparams _UpperCAmelCase = 0.66_46_94 _UpperCAmelCase = 0.20_79_51 _UpperCAmelCase = 0.12_11_94 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = 0.0_35_25_13 _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = False # hparam_utils.py hparams _UpperCAmelCase = 36.45_19 _UpperCAmelCase = 0.90_34_21 _UpperCAmelCase = 2_22.0_88 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = 0.76_31_41 _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "TABFACT": _UpperCAmelCase = TapasForSequenceClassification(config=lowercase ) elif task == "MLM": _UpperCAmelCase = TapasForMaskedLM(config=lowercase ) elif task == "INTERMEDIATE_PRETRAINING": _UpperCAmelCase = TapasModel(config=lowercase ) else: raise ValueError(f'''Task {task} not supported.''' ) print(f'''Building PyTorch model from configuration: {config}''' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(lowercase ,lowercase ,lowercase ) # Save pytorch-model (weights and configuration) print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(lowercase ) # Save tokenizer files print(f'''Save tokenizer files to {pytorch_dump_path}''' ) _UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" ,model_max_length=5_12 ) tokenizer.save_pretrained(lowercase ) print("""Used relative position embeddings:""" ,model.config.reset_position_index_per_cell ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
289
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { '''google/pix2struct-textcaps-base''': ( '''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json''' ), } class __a ( lowerCAmelCase_ ): __lowercase : Optional[int] = 'pix2struct_text_model' __lowercase : Dict = ['past_key_values'] __lowercase : Union[str, Any] = { 'hidden_size': 'hidden_size', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , lowerCAmelCase__=50_244 , lowerCAmelCase__=768 , lowerCAmelCase__=64 , lowerCAmelCase__=2_048 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=32 , lowerCAmelCase__=128 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1E-6 , lowerCAmelCase__=1.0 , lowerCAmelCase__="gelu_new" , lowerCAmelCase__=0 , lowerCAmelCase__=False , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> List[Any]: '''simple docstring''' lowercase__: int = vocab_size lowercase__: Tuple = hidden_size lowercase__: Optional[int] = d_kv lowercase__: Optional[Any] = d_ff lowercase__: Any = num_layers lowercase__: Optional[Any] = num_heads lowercase__: List[str] = relative_attention_num_buckets lowercase__: Optional[Any] = relative_attention_max_distance lowercase__: Tuple = dropout_rate lowercase__: Tuple = layer_norm_epsilon lowercase__: Any = initializer_factor lowercase__: Optional[int] = use_cache lowercase__: Union[str, Any] = eos_token_id lowercase__: Tuple = decoder_start_token_id # for backwards compatibility lowercase__: Any = dense_act_fn super().__init__( pad_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , tie_word_embeddings=__lowerCAmelCase , is_decoder=__lowerCAmelCase , **__lowerCAmelCase , ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]: '''simple docstring''' cls._set_token_in_kwargs(__lowerCAmelCase ) lowercase__ , lowercase__: str = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get('model_type' ) == "pix2struct": lowercase__: Union[str, Any] = config_dict['text_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase ) class __a ( lowerCAmelCase_ ): __lowercase : Tuple = 'pix2struct_vision_model' def __init__( self , lowerCAmelCase__=768 , lowerCAmelCase__=768 , lowerCAmelCase__=2_048 , lowerCAmelCase__=64 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__="gelu_new" , lowerCAmelCase__=1E-6 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=1.0 , lowerCAmelCase__=4_096 , lowerCAmelCase__=32 , lowerCAmelCase__=128 , **lowerCAmelCase__ , ) -> int: '''simple docstring''' super().__init__(**__lowerCAmelCase ) lowercase__: str = hidden_size lowercase__: Tuple = patch_embed_hidden_size lowercase__: Dict = d_ff lowercase__: str = dropout_rate lowercase__: Union[str, Any] = num_hidden_layers lowercase__: str = num_attention_heads lowercase__: Union[str, Any] = initializer_range lowercase__: str = initializer_factor lowercase__: int = attention_dropout lowercase__: Tuple = layer_norm_eps lowercase__: Tuple = dense_act_fn lowercase__: List[str] = seq_len lowercase__: str = relative_attention_num_buckets lowercase__: Union[str, Any] = relative_attention_max_distance lowercase__: List[Any] = d_kv @classmethod def SCREAMING_SNAKE_CASE__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> str: '''simple docstring''' cls._set_token_in_kwargs(__lowerCAmelCase ) lowercase__ , lowercase__: Optional[Any] = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get('model_type' ) == "pix2struct": lowercase__: Union[str, Any] = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase ) class __a ( lowerCAmelCase_ ): __lowercase : Any = 'pix2struct' __lowercase : Any = True def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=1.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> Dict: '''simple docstring''' super().__init__(tie_word_embeddings=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase ) if text_config is None: lowercase__: Optional[Any] = {} logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' ) if vision_config is None: lowercase__: Any = {} logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' ) lowercase__: List[str] = PixaStructTextConfig(**__lowerCAmelCase ) lowercase__: Optional[Any] = PixaStructVisionConfig(**__lowerCAmelCase ) lowercase__: int = self.text_config.decoder_start_token_id lowercase__: Union[str, Any] = self.text_config.pad_token_id lowercase__: Optional[Any] = self.text_config.eos_token_id lowercase__: Optional[Any] = initializer_factor lowercase__: List[Any] = initializer_range lowercase__: Union[str, Any] = self.initializer_range lowercase__: str = self.initializer_range lowercase__: List[str] = is_vqa @classmethod def SCREAMING_SNAKE_CASE__ ( cls , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]: '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]: '''simple docstring''' lowercase__: str = copy.deepcopy(self.__dict__ ) lowercase__: int = self.text_config.to_dict() lowercase__: Tuple = self.vision_config.to_dict() lowercase__: Optional[int] = self.__class__.model_type return output
196
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml UpperCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" def run_func(lowercase ): @wraps(lowercase ) def run_in_eager_mode(*lowercase ,**lowercase ): return func(*lowercase ,**lowercase ) @wraps(lowercase ) @tf.function(experimental_compile=lowercase ) def run_in_graph_mode(*lowercase ,**lowercase ): return func(*lowercase ,**lowercase ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = random.Random() _UpperCAmelCase = [rng.randint(0 ,vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(lowercase ,shape=(batch_size, sequence_length) ,dtype=tf.intaa ) class a ( lowerCAmelCase_ ): _snake_case : TensorFlowBenchmarkArguments _snake_case : PretrainedConfig _snake_case : str = "TensorFlow" @property def lowerCAmelCase_ ( self : Union[str, Any] ): return tf.__version__ def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): # initialize GPU on separate process _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_speed(_inference ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_speed(_train ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase ) _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_memory(_inference ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase ) _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_memory(_train ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _UpperCAmelCase = ( hasattr(__lowerCAmelCase , """architectures""" ) and isinstance(config.architectures , __lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_cls(__lowerCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _UpperCAmelCase = TF_MODEL_MAPPING[config.__class__](__lowerCAmelCase ) # encoder-decoder has vocab size saved differently _UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size _UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , training=__lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(__lowerCAmelCase , training=__lowerCAmelCase ) _UpperCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _UpperCAmelCase = ( hasattr(__lowerCAmelCase , """architectures""" ) and isinstance(config.architectures , __lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_cls(__lowerCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _UpperCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__lowerCAmelCase ) # encoder-decoder has vocab size saved differently _UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size _UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): _UpperCAmelCase = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0] _UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): _UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0] _UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables ) return gradients _UpperCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Any ): with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(__lowerCAmelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _UpperCAmelCase = timeit.repeat( __lowerCAmelCase , repeat=self.args.repeat , number=10 , ) return min(__lowerCAmelCase ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Callable[[], None] ): logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _UpperCAmelCase = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _UpperCAmelCase = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _UpperCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _UpperCAmelCase = nvml.nvmlDeviceGetMemoryInfo(__lowerCAmelCase ) _UpperCAmelCase = meminfo.used _UpperCAmelCase = Memory(__lowerCAmelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _UpperCAmelCase = None else: _UpperCAmelCase = measure_peak_memory_cpu(__lowerCAmelCase ) _UpperCAmelCase = Memory(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else memory_bytes if self.args.trace_memory_line_by_line: _UpperCAmelCase = stop_memory_tracing(__lowerCAmelCase ) if memory is None: _UpperCAmelCase = summary.total else: _UpperCAmelCase = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) return "N/A", None
289
0
'''simple docstring''' _SCREAMING_SNAKE_CASE : List[Any] = { "meter": "m", "kilometer": "km", "megametre": "Mm", "gigametre": "Gm", "terametre": "Tm", "petametre": "Pm", "exametre": "Em", "zettametre": "Zm", "yottametre": "Ym", } # Exponent of the factor(meter) _SCREAMING_SNAKE_CASE : Optional[int] = { "m": 0, "km": 3, "Mm": 6, "Gm": 9, "Tm": 12, "Pm": 15, "Em": 18, "Zm": 21, "Ym": 24, } def UpperCamelCase_( snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Optional[Any] ): '''simple docstring''' snake_case_ = from_type.lower().strip("s" ) snake_case_ = to_type.lower().strip("s" ) snake_case_ = UNIT_SYMBOL.get(snake_case , snake_case ) snake_case_ = UNIT_SYMBOL.get(snake_case , snake_case ) if from_sanitized not in METRIC_CONVERSION: snake_case_ = ( f'Invalid \'from_type\' value: {from_type!r}.\n' f'Conversion abbreviations are: {", ".join(snake_case )}' ) raise ValueError(snake_case ) if to_sanitized not in METRIC_CONVERSION: snake_case_ = ( f'Invalid \'to_type\' value: {to_type!r}.\n' f'Conversion abbreviations are: {", ".join(snake_case )}' ) raise ValueError(snake_case ) snake_case_ = METRIC_CONVERSION[from_sanitized] snake_case_ = METRIC_CONVERSION[to_sanitized] snake_case_ = 1 if from_exponent > to_exponent: snake_case_ = from_exponent - to_exponent else: snake_case_ = -(to_exponent - from_exponent) return value * pow(1_0 , snake_case ) if __name__ == "__main__": from doctest import testmod testmod()
85
"""simple docstring""" from math import pow def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,): """simple docstring""" if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count _UpperCAmelCase = int(pow(lowercase ,lowercase ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n _UpperCAmelCase , _UpperCAmelCase = backtrack( lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. _UpperCAmelCase , _UpperCAmelCase = backtrack( lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase ) return current_sum, solutions_count def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10): raise ValueError( """Invalid input\n""" """needed_sum must be between 1 and 1000, power between 2 and 10.""" ) return backtrack(lowercase ,lowercase ,1 ,0 ,0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
289
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) if is_vision_available(): import PIL class lowercase ( lowerCAmelCase_ ): _SCREAMING_SNAKE_CASE = ['pixel_values'] def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ) -> int: super().__init__(**__lowerCAmelCase ) lowerCAmelCase = size if size is not None else {"""shortest_edge""": 224} lowerCAmelCase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase ) lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowerCAmelCase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase , param_name="""crop_size""" ) lowerCAmelCase = do_resize lowerCAmelCase = size lowerCAmelCase = resample lowerCAmelCase = do_center_crop lowerCAmelCase = crop_size lowerCAmelCase = do_rescale lowerCAmelCase = rescale_factor lowerCAmelCase = do_normalize lowerCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowerCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD lowerCAmelCase = do_convert_rgb def _snake_case ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> int: lowerCAmelCase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) lowerCAmelCase = get_resize_output_image_size(__lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=__lowerCAmelCase ) return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def _snake_case ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> str: lowerCAmelCase = get_size_dict(__lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(__lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def _snake_case ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> Union[str, Any]: return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> List[Any]: return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def _snake_case ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> Union[str, Any]: lowerCAmelCase = do_resize if do_resize is not None else self.do_resize lowerCAmelCase = size if size is not None else self.size lowerCAmelCase = get_size_dict(__lowerCAmelCase , param_name="""size""" , default_to_square=__lowerCAmelCase ) lowerCAmelCase = resample if resample is not None else self.resample lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCAmelCase = crop_size if crop_size is not None else self.crop_size lowerCAmelCase = get_size_dict(__lowerCAmelCase , param_name="""crop_size""" , default_to_square=__lowerCAmelCase ) lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase = image_mean if image_mean is not None else self.image_mean lowerCAmelCase = image_std if image_std is not None else self.image_std lowerCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowerCAmelCase = make_list_of_images(__lowerCAmelCase ) if not valid_images(__lowerCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowerCAmelCase = [convert_to_rgb(__lowerCAmelCase ) for image in images] # All transformations expect numpy arrays. lowerCAmelCase = [to_numpy_array(__lowerCAmelCase ) for image in images] if do_resize: lowerCAmelCase = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images] if do_center_crop: lowerCAmelCase = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images] if do_rescale: lowerCAmelCase = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images] if do_normalize: lowerCAmelCase = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images] lowerCAmelCase = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images] lowerCAmelCase = {"""pixel_values""": images} return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
46
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } UpperCAmelCase__ = { """b0""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 2_2_4, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 2_4_0, """dropout_rate""": 0.2, """dw_padding""": [1_6], }, """b2""": { """hidden_dim""": 1_4_0_8, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 2_6_0, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 1_6], }, """b3""": { """hidden_dim""": 1_5_3_6, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 3_0_0, """dropout_rate""": 0.3, """dw_padding""": [5, 1_8], }, """b4""": { """hidden_dim""": 1_7_9_2, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 3_8_0, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_0_4_8, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 4_5_6, """dropout_rate""": 0.4, """dw_padding""": [1_3, 2_7], }, """b6""": { """hidden_dim""": 2_3_0_4, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 5_2_8, """dropout_rate""": 0.5, """dw_padding""": [3_1], }, """b7""": { """hidden_dim""": 2_5_6_0, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 6_0_0, """dropout_rate""": 0.5, """dw_padding""": [1_8], }, } def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = EfficientNetConfig() _UpperCAmelCase = CONFIG_MAP[model_name]["""hidden_dim"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""width_coef"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""depth_coef"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""dropout_rate"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""dw_padding"""] _UpperCAmelCase = """huggingface/label-files""" _UpperCAmelCase = """imagenet-1k-id2label.json""" _UpperCAmelCase = 10_00 _UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) ) _UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw ) return im def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = EfficientNetImageProcessor( size={"""height""": size, """width""": size} ,image_mean=[0.4_85, 0.4_56, 0.4_06] ,image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] ,do_center_crop=lowercase ,) return preprocessor def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] _UpperCAmelCase = sorted(set(lowercase ) ) _UpperCAmelCase = len(lowercase ) _UpperCAmelCase = {b: str(lowercase ) for b, i in zip(lowercase ,range(lowercase ) )} _UpperCAmelCase = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: _UpperCAmelCase = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) _UpperCAmelCase = {} for item in rename_keys: if item[0] in original_param_names: _UpperCAmelCase = """efficientnet.""" + item[1] _UpperCAmelCase = """classifier.weight""" _UpperCAmelCase = """classifier.bias""" return key_mapping def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue _UpperCAmelCase = key_mapping[key] if "_conv" in key and "kernel" in key: _UpperCAmelCase = torch.from_numpy(lowercase ).permute(3 ,2 ,0 ,1 ) elif "depthwise_kernel" in key: _UpperCAmelCase = torch.from_numpy(lowercase ).permute(2 ,3 ,0 ,1 ) elif "kernel" in key: _UpperCAmelCase = torch.from_numpy(np.transpose(lowercase ) ) else: _UpperCAmelCase = torch.from_numpy(lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase ) @torch.no_grad() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = model_classes[model_name]( include_top=lowercase ,weights="""imagenet""" ,input_tensor=lowercase ,input_shape=lowercase ,pooling=lowercase ,classes=10_00 ,classifier_activation="""softmax""" ,) _UpperCAmelCase = original_model.trainable_variables _UpperCAmelCase = original_model.non_trainable_variables _UpperCAmelCase = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: _UpperCAmelCase = param.numpy() _UpperCAmelCase = list(tf_params.keys() ) # Load HuggingFace model _UpperCAmelCase = get_efficientnet_config(lowercase ) _UpperCAmelCase = EfficientNetForImageClassification(lowercase ).eval() _UpperCAmelCase = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) _UpperCAmelCase = rename_keys(lowercase ) replace_params(lowercase ,lowercase ,lowercase ) # Initialize preprocessor and preprocess input image _UpperCAmelCase = convert_image_processor(lowercase ) _UpperCAmelCase = preprocessor(images=prepare_img() ,return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): _UpperCAmelCase = hf_model(**lowercase ) _UpperCAmelCase = outputs.logits.detach().numpy() # Original model inference _UpperCAmelCase = False _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST ) _UpperCAmelCase = image.img_to_array(lowercase ) _UpperCAmelCase = np.expand_dims(lowercase ,axis=0 ) _UpperCAmelCase = original_model.predict(lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase ,lowercase ,atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(lowercase ): os.mkdir(lowercase ) # Save converted model and image processor hf_model.save_pretrained(lowercase ) preprocessor.save_pretrained(lowercase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) _UpperCAmelCase = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowercase ) hf_model.push_to_hub(lowercase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") UpperCAmelCase__ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
289
0
from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class a : def __init__( self :Any ,__lowercase :int ,__lowercase :Union[str, Any]=1_3 ,__lowercase :int=3_0 ,__lowercase :str=2 ,__lowercase :int=3 ,__lowercase :Dict=True ,__lowercase :Tuple=True ,__lowercase :List[Any]=3_2 ,__lowercase :Tuple=2 ,__lowercase :Dict=4 ,__lowercase :int=3_7 ,__lowercase :Dict="gelu" ,__lowercase :Dict=0.1 ,__lowercase :Dict=0.1 ,__lowercase :Optional[int]=1_0 ,__lowercase :Tuple=0.02 ,__lowercase :Optional[int]=3 ,__lowercase :List[Any]=None ,): snake_case__ : List[str] = parent snake_case__ : Optional[Any] = batch_size snake_case__ : List[str] = image_size snake_case__ : Optional[Any] = patch_size snake_case__ : Optional[int] = num_channels snake_case__ : Optional[Any] = is_training snake_case__ : List[str] = use_labels snake_case__ : Union[str, Any] = hidden_size snake_case__ : Tuple = num_hidden_layers snake_case__ : Any = num_attention_heads snake_case__ : List[str] = intermediate_size snake_case__ : str = hidden_act snake_case__ : Tuple = hidden_dropout_prob snake_case__ : Optional[int] = attention_probs_dropout_prob snake_case__ : Optional[int] = type_sequence_label_size snake_case__ : List[str] = initializer_range snake_case__ : str = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) snake_case__ : List[str] = (image_size // patch_size) ** 2 snake_case__ : Optional[Any] = num_patches + 1 def __lowerCamelCase ( self :Dict ): snake_case__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ : int = None if self.use_labels: snake_case__ : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) snake_case__ : List[str] = self.get_config() return config, pixel_values, labels def __lowerCamelCase ( self :str ): return ViTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,) def __lowerCamelCase ( self :List[Any] ,__lowercase :List[str] ,__lowercase :Any ,__lowercase :Tuple ): snake_case__ : Dict = TFViTModel(config=__lowerCAmelCase ) snake_case__ : List[Any] = model(__lowerCAmelCase ,training=__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. snake_case__ : int = self.image_size // 2 snake_case__ : Union[str, Any] = pixel_values[:, :, :image_size, :image_size] snake_case__ : Dict = model(__lowerCAmelCase ,interpolate_pos_encoding=__lowerCAmelCase ,training=__lowerCAmelCase ) snake_case__ : List[Any] = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, seq_length, self.hidden_size) ) def __lowerCamelCase ( self :Optional[Any] ,__lowercase :int ,__lowercase :str ,__lowercase :str ): snake_case__ : Tuple = self.type_sequence_label_size snake_case__ : List[str] = TFViTForImageClassification(__lowerCAmelCase ) snake_case__ : Dict = model(__lowerCAmelCase ,labels=__lowerCAmelCase ,training=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. snake_case__ : Optional[int] = self.image_size // 2 snake_case__ : List[Any] = pixel_values[:, :, :image_size, :image_size] snake_case__ : Tuple = model(__lowerCAmelCase ,interpolate_pos_encoding=__lowerCAmelCase ,training=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case__ : Any = 1 snake_case__ : Dict = TFViTForImageClassification(__lowerCAmelCase ) snake_case__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case__ : Optional[Any] = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def __lowerCamelCase ( self :List[Any] ): snake_case__ : Tuple = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs snake_case__ : Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : int = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () __lowerCAmelCase : int = ( {'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification} if is_tf_available() else {} ) __lowerCAmelCase : Tuple = False __lowerCAmelCase : Optional[Any] = False __lowerCAmelCase : List[str] = False def __lowerCamelCase ( self :Any ): snake_case__ : Optional[int] = TFViTModelTester(self ) snake_case__ : List[Any] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=3_7 ) def __lowerCamelCase ( self :Any ): self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def __lowerCamelCase ( self :Optional[int] ): pass @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def __lowerCamelCase ( self :Any ): pass def __lowerCamelCase ( self :List[str] ): snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : List[str] = model_class(__lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) ) snake_case__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCAmelCase ,tf.keras.layers.Layer ) ) def __lowerCamelCase ( self :Dict ): snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Any = model_class(__lowerCAmelCase ) snake_case__ : Any = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : List[str] = [*signature.parameters.keys()] snake_case__ : str = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,__lowerCAmelCase ) def __lowerCamelCase ( self :Any ): snake_case__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def __lowerCamelCase ( self :Optional[Any] ): snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) @slow def __lowerCamelCase ( self :Dict ): snake_case__ : int = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(__lowerCAmelCase ) def _lowerCAmelCase ( ) -> Any: """simple docstring""" snake_case__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class a ( unittest.TestCase ): @cached_property def __lowerCamelCase ( self :Dict ): return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def __lowerCamelCase ( self :Any ): snake_case__ : Any = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ) snake_case__ : int = self.default_image_processor snake_case__ : List[str] = prepare_img() snake_case__ : Optional[Any] = image_processor(images=__lowerCAmelCase ,return_tensors='''tf''' ) # forward pass snake_case__ : Any = model(**__lowerCAmelCase ) # verify the logits snake_case__ : List[str] = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape ,__lowerCAmelCase ) snake_case__ : Optional[Any] = tf.constant([-0.2744, 0.8215, -0.0836] ) tf.debugging.assert_near(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 )
230
"""simple docstring""" from __future__ import annotations from collections import Counter from random import random class a : def __init__( self : Union[str, Any] ): _UpperCAmelCase = {} def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ): _UpperCAmelCase = {} def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : float ): if nodea not in self.connections: self.add_node(__lowerCAmelCase ) if nodea not in self.connections: self.add_node(__lowerCAmelCase ) _UpperCAmelCase = probability def lowerCAmelCase_ ( self : Optional[Any] ): return list(self.connections ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str ): _UpperCAmelCase = 0 _UpperCAmelCase = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(lowercase ,lowercase ,lowercase ) _UpperCAmelCase = Counter(graph.get_nodes() ) _UpperCAmelCase = start for _ in range(lowercase ): _UpperCAmelCase = graph.transition(lowercase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
289
0
"""simple docstring""" import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__) lowerCamelCase__ : List[str] = ['''model.decoder.embed_positions.weights'''] def UpperCamelCase ( _lowerCAmelCase : Optional[Any] ) -> Tuple: if "emb" in name: _UpperCAmelCase : Tuple = name.replace("""emb""", """model.decoder.embed_tokens""" ) if "transformer" in name: _UpperCAmelCase : Optional[Any] = name.replace("""transformer""", """model.decoder""" ) if "cross_attention" in name: _UpperCAmelCase : int = name.replace("""cross_attention""", """encoder_attn""" ) if "linear1" in name: _UpperCAmelCase : Optional[int] = name.replace("""linear1""", """fc1""" ) if "linear2" in name: _UpperCAmelCase : Optional[int] = name.replace("""linear2""", """fc2""" ) if "norm1" in name: _UpperCAmelCase : List[Any] = name.replace("""norm1""", """self_attn_layer_norm""" ) if "norm_cross" in name: _UpperCAmelCase : Tuple = name.replace("""norm_cross""", """encoder_attn_layer_norm""" ) if "norm2" in name: _UpperCAmelCase : Dict = name.replace("""norm2""", """final_layer_norm""" ) if "out_norm" in name: _UpperCAmelCase : List[str] = name.replace("""out_norm""", """model.decoder.layer_norm""" ) if "linears" in name: _UpperCAmelCase : List[str] = name.replace("""linears""", """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: _UpperCAmelCase : str = name.replace("""condition_provider.conditioners.description.output_proj""", """enc_to_dec_proj""" ) return name def UpperCamelCase ( _lowerCAmelCase : Tuple, _lowerCAmelCase : List[Any] ) -> List[Any]: _UpperCAmelCase : Dict = list(state_dict.keys() ) _UpperCAmelCase : List[Any] = {} for key in keys: _UpperCAmelCase : str = state_dict.pop(_lowerCAmelCase ) _UpperCAmelCase : List[Any] = rename_keys(_lowerCAmelCase ) if "in_proj_weight" in key: # split fused qkv proj _UpperCAmelCase : int = val[:hidden_size, :] _UpperCAmelCase : Union[str, Any] = val[hidden_size : 2 * hidden_size, :] _UpperCAmelCase : Any = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: _UpperCAmelCase : Any = val else: _UpperCAmelCase : Optional[Any] = val return state_dict, enc_dec_proj_state_dict def UpperCamelCase ( _lowerCAmelCase : Dict ) -> Union[str, Any]: if checkpoint == "small": # default config values _UpperCAmelCase : Tuple = 1024 _UpperCAmelCase : List[str] = 24 _UpperCAmelCase : Any = 16 elif checkpoint == "medium": _UpperCAmelCase : int = 1536 _UpperCAmelCase : int = 48 _UpperCAmelCase : List[str] = 24 elif checkpoint == "large": _UpperCAmelCase : Tuple = 2048 _UpperCAmelCase : int = 48 _UpperCAmelCase : List[str] = 32 else: raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' ) _UpperCAmelCase : Dict = MusicgenDecoderConfig( hidden_size=_lowerCAmelCase, ffn_dim=hidden_size * 4, num_hidden_layers=_lowerCAmelCase, num_attention_heads=_lowerCAmelCase, ) return config @torch.no_grad() def UpperCamelCase ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : List[str]=None, _lowerCAmelCase : Union[str, Any]=None, _lowerCAmelCase : List[Any]="cpu" ) -> Union[str, Any]: _UpperCAmelCase : List[Any] = MusicGen.get_pretrained(_lowerCAmelCase, device=_lowerCAmelCase ) _UpperCAmelCase : Any = decoder_config_from_checkpoint(_lowerCAmelCase ) _UpperCAmelCase : List[str] = fairseq_model.lm.state_dict() _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = rename_state_dict( _lowerCAmelCase, hidden_size=decoder_config.hidden_size ) _UpperCAmelCase : Tuple = TaEncoderModel.from_pretrained("""t5-base""" ) _UpperCAmelCase : str = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) _UpperCAmelCase : Any = MusicgenForCausalLM(_lowerCAmelCase ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection _UpperCAmelCase , _UpperCAmelCase : Dict = decoder.load_state_dict(_lowerCAmelCase, strict=_lowerCAmelCase ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' ) if len(_lowerCAmelCase ) > 0: raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' ) # init the composite model _UpperCAmelCase : str = MusicgenForConditionalGeneration(text_encoder=_lowerCAmelCase, audio_encoder=_lowerCAmelCase, decoder=_lowerCAmelCase ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(_lowerCAmelCase ) # check we can do a forward pass _UpperCAmelCase : List[Any] = torch.arange(0, 8, dtype=torch.long ).reshape(2, -1 ) _UpperCAmelCase : List[str] = input_ids.reshape(2 * 4, -1 ) with torch.no_grad(): _UpperCAmelCase : List[Any] = model(input_ids=_lowerCAmelCase, decoder_input_ids=_lowerCAmelCase ).logits if logits.shape != (8, 1, 2048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor _UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained("""t5-base""" ) _UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""", padding_side="""left""" ) _UpperCAmelCase : Any = MusicgenProcessor(feature_extractor=_lowerCAmelCase, tokenizer=_lowerCAmelCase ) # set the appropriate bos/pad token ids _UpperCAmelCase : Optional[Any] = 2048 _UpperCAmelCase : str = 2048 # set other default generation config params _UpperCAmelCase : int = int(30 * audio_encoder.config.frame_rate ) _UpperCAmelCase : List[Any] = True _UpperCAmelCase : int = 3.0 if pytorch_dump_folder is not None: Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' ) model.save_pretrained(_lowerCAmelCase ) processor.save_pretrained(_lowerCAmelCase ) if repo_id: logger.info(f'''Pushing model {checkpoint} to {repo_id}''' ) model.push_to_hub(_lowerCAmelCase ) processor.push_to_hub(_lowerCAmelCase ) if __name__ == "__main__": lowerCamelCase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint''', default='''small''', type=str, help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''', ) parser.add_argument( '''--pytorch_dump_folder''', required=True, default=None, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) parser.add_argument( '''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.''' ) lowerCamelCase__ : int = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
246
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class a ( unittest.TestCase ): def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Optional[Any]=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : List[str]=400 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=None , __lowerCAmelCase : List[str]=True , ): _UpperCAmelCase = size if size is not None else {"""shortest_edge""": 20} _UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = do_flip_channel_order def lowerCAmelCase_ ( self : List[str] ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[int] = MobileViTImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = MobileViTImageProcessingTester(self ) @property def lowerCAmelCase_ ( self : Tuple ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_flip_channel_order""" ) ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def lowerCAmelCase_ ( self : List[str] ): pass def lowerCAmelCase_ ( self : Dict ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self : str ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self : Optional[int] ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
289
0
'''simple docstring''' import math def _A ( lowercase__ , lowercase__ ): if initial_intensity < 0: raise ValueError("""The value of intensity cannot be negative""" ) # handling of negative values of initial intensity if angle < 0 or angle > 360: raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(lowercase__ ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name="malus_law")
164
"""simple docstring""" from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : Any = 'efficientnet' def __init__( self : Any , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 600 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2560 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.001 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : List[Any] , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = width_coefficient _UpperCAmelCase = depth_coefficient _UpperCAmelCase = depth_divisor _UpperCAmelCase = kernel_sizes _UpperCAmelCase = in_channels _UpperCAmelCase = out_channels _UpperCAmelCase = depthwise_padding _UpperCAmelCase = strides _UpperCAmelCase = num_block_repeats _UpperCAmelCase = expand_ratios _UpperCAmelCase = squeeze_expansion_ratio _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dim _UpperCAmelCase = pooling_type _UpperCAmelCase = initializer_range _UpperCAmelCase = batch_norm_eps _UpperCAmelCase = batch_norm_momentum _UpperCAmelCase = dropout_rate _UpperCAmelCase = drop_connect_rate _UpperCAmelCase = sum(__lowerCAmelCase ) * 4 class a ( lowerCAmelCase_ ): _snake_case : Dict = version.parse('1.11' ) @property def lowerCAmelCase_ ( self : Any ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase_ ( self : int ): return 1e-5
289
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__) def a_ ( lowerCamelCase , lowerCamelCase=False ): UpperCAmelCase__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ('cls_token', 'vit.embeddings.cls_token'), ('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'), ('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'), ('pos_embed', 'vit.embeddings.position_embeddings'), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ('pre_logits.fc.weight', 'pooler.dense.weight'), ('pre_logits.fc.bias', 'pooler.dense.bias'), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" UpperCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('norm.weight', 'vit.layernorm.weight'), ('norm.bias', 'vit.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=False ): for i in range(config.num_hidden_layers ): if base_model: UpperCAmelCase__ = '' else: UpperCAmelCase__ = 'vit.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) UpperCAmelCase__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase__ = in_proj_weight[ : config.hidden_size, : ] UpperCAmelCase__ = in_proj_bias[: config.hidden_size] UpperCAmelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase__ = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase__ = in_proj_bias[-config.hidden_size :] def a_ ( lowerCamelCase ): UpperCAmelCase__ = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(lowerCamelCase , lowerCamelCase ) def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = dct.pop(lowerCamelCase ) UpperCAmelCase__ = val def a_ ( ): UpperCAmelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' UpperCAmelCase__ = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) return im @torch.no_grad() def a_ ( lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = ViTConfig() UpperCAmelCase__ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": UpperCAmelCase__ = True UpperCAmelCase__ = int(vit_name[-1_2:-1_0] ) UpperCAmelCase__ = int(vit_name[-9:-6] ) else: UpperCAmelCase__ = 1_0_0_0 UpperCAmelCase__ = 'huggingface/label-files' UpperCAmelCase__ = 'imagenet-1k-id2label.json' UpperCAmelCase__ = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) , 'r' ) ) UpperCAmelCase__ = {int(lowerCamelCase ): v for k, v in idalabel.items()} UpperCAmelCase__ = idalabel UpperCAmelCase__ = {v: k for k, v in idalabel.items()} UpperCAmelCase__ = int(vit_name[-6:-4] ) UpperCAmelCase__ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('tiny' ): UpperCAmelCase__ = 1_9_2 UpperCAmelCase__ = 7_6_8 UpperCAmelCase__ = 1_2 UpperCAmelCase__ = 3 elif vit_name[9:].startswith('small' ): UpperCAmelCase__ = 3_8_4 UpperCAmelCase__ = 1_5_3_6 UpperCAmelCase__ = 1_2 UpperCAmelCase__ = 6 else: pass else: if vit_name[4:].startswith('small' ): UpperCAmelCase__ = 7_6_8 UpperCAmelCase__ = 2_3_0_4 UpperCAmelCase__ = 8 UpperCAmelCase__ = 8 elif vit_name[4:].startswith('base' ): pass elif vit_name[4:].startswith('large' ): UpperCAmelCase__ = 1_0_2_4 UpperCAmelCase__ = 4_0_9_6 UpperCAmelCase__ = 2_4 UpperCAmelCase__ = 1_6 elif vit_name[4:].startswith('huge' ): UpperCAmelCase__ = 1_2_8_0 UpperCAmelCase__ = 5_1_2_0 UpperCAmelCase__ = 3_2 UpperCAmelCase__ = 1_6 # load original model from timm UpperCAmelCase__ = timm.create_model(lowerCamelCase , pretrained=lowerCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys UpperCAmelCase__ = timm_model.state_dict() if base_model: remove_classification_head_(lowerCamelCase ) UpperCAmelCase__ = create_rename_keys(lowerCamelCase , lowerCamelCase ) for src, dest in rename_keys: rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase ) read_in_q_k_v(lowerCamelCase , lowerCamelCase , lowerCamelCase ) # load HuggingFace model if vit_name[-5:] == "in21k": UpperCAmelCase__ = ViTModel(lowerCamelCase ).eval() else: UpperCAmelCase__ = ViTForImageClassification(lowerCamelCase ).eval() model.load_state_dict(lowerCamelCase ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: UpperCAmelCase__ = DeiTImageProcessor(size=config.image_size ) else: UpperCAmelCase__ = ViTImageProcessor(size=config.image_size ) UpperCAmelCase__ = image_processor(images=prepare_img() , return_tensors='pt' ) UpperCAmelCase__ = encoding['pixel_values'] UpperCAmelCase__ = model(lowerCamelCase ) if base_model: UpperCAmelCase__ = timm_model.forward_features(lowerCamelCase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(lowerCamelCase , outputs.pooler_output , atol=1e-3 ) else: UpperCAmelCase__ = timm_model(lowerCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowerCamelCase , outputs.logits , atol=1e-3 ) Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase ) print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowerCamelCase ) if __name__ == "__main__": lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowerCAmelCase__ : Union[str, Any] = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
98
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class a : def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : str=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Optional[int]=16 , __lowerCAmelCase : Dict=36 , __lowerCAmelCase : Optional[Any]=6 , __lowerCAmelCase : List[str]=6 , __lowerCAmelCase : Union[str, Any]=6 , __lowerCAmelCase : str=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Any=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = embedding_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_hidden_groups _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Union[str, Any] ): return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Any ): _UpperCAmelCase = AlbertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ): _UpperCAmelCase = AlbertForPreTraining(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , sentence_order_label=__lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ): _UpperCAmelCase = AlbertForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ): _UpperCAmelCase = AlbertForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = AlbertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = AlbertForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = AlbertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : str = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) _snake_case : Tuple = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) _snake_case : Dict = True def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any]=False ): _UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): _UpperCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase ) _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = AlbertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Optional[int] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCAmelCase = type self.model_tester.create_and_check_model(*__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = AlbertModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_torch class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = AlbertModel.from_pretrained("""albert-base-v2""" ) _UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0] _UpperCAmelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = torch.tensor( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
289
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCamelCase :int = { '''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Any = [ '''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FalconForCausalLM''', '''FalconModel''', '''FalconPreTrainedModel''', '''FalconForSequenceClassification''', '''FalconForTokenClassification''', '''FalconForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys lowerCamelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
206
"""simple docstring""" UpperCAmelCase__ = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" # Return True if there is node that has not iterated. _UpperCAmelCase = [False] * len(lowercase ) _UpperCAmelCase = [s] _UpperCAmelCase = True while queue: _UpperCAmelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase ) _UpperCAmelCase = True _UpperCAmelCase = u return visited[t] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = [-1] * (len(lowercase )) _UpperCAmelCase = 0 _UpperCAmelCase = [] _UpperCAmelCase = [i[:] for i in graph] # Record original cut, copy. while bfs(lowercase ,lowercase ,lowercase ,lowercase ): _UpperCAmelCase = float("""Inf""" ) _UpperCAmelCase = sink while s != source: # Find the minimum value in select path _UpperCAmelCase = min(lowercase ,graph[parent[s]][s] ) _UpperCAmelCase = parent[s] max_flow += path_flow _UpperCAmelCase = sink while v != source: _UpperCAmelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCAmelCase = parent[v] for i in range(len(lowercase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
289
0
"""simple docstring""" import math def A_ ( _lowercase ): '''simple docstring''' snake_case_ :List[str] = 0 snake_case_ :List[Any] = 0 while num > 0: snake_case_ :List[Any] = num % 8 snake_case_ :int = octal + (remainder * math.floor(math.pow(10, _lowercase ) )) counter += 1 snake_case_ :Dict = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return f"""0o{int(_lowercase )}""" def A_ ( ): '''simple docstring''' print("""\n2 in octal is:""" ) print(decimal_to_octal(2 ) ) # = 2 print("""\n8 in octal is:""" ) print(decimal_to_octal(8 ) ) # = 10 print("""\n65 in octal is:""" ) print(decimal_to_octal(65 ) ) # = 101 print("""\n216 in octal is:""" ) print(decimal_to_octal(216 ) ) # = 330 print("""\n512 in octal is:""" ) print(decimal_to_octal(512 ) ) # = 1000 print("""\n""" ) if __name__ == "__main__": main()
66
"""simple docstring""" import math class a : def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ): _UpperCAmelCase = 0.0 _UpperCAmelCase = 0.0 for i in range(len(__lowerCAmelCase ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : list[list[int | float]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : float ): for i in range(len(__lowerCAmelCase ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def __UpperCAmelCase ( ): """simple docstring""" # Training Examples ( m, n ) _UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _UpperCAmelCase = SelfOrganizingMap() _UpperCAmelCase = 3 _UpperCAmelCase = 0.5 for _ in range(lowercase ): for j in range(len(lowercase ) ): # training sample _UpperCAmelCase = training_samples[j] # Compute the winning vector _UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase ) # Update the winning vector _UpperCAmelCase = self_organizing_map.update(lowercase ,lowercase ,lowercase ,lowercase ) # classify test sample _UpperCAmelCase = [0, 0, 0, 1] _UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase ) # results print(f'''Clusters that the test sample belongs to : {winner}''' ) print(f'''Weights that have been trained : {weights}''' ) # running the main() function if __name__ == "__main__": main()
289
0
'''simple docstring''' from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''microsoft/xprophetnet-large-wiki100-cased''': ( '''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json''' ), } class lowercase ( lowerCAmelCase_ ): """simple docstring""" _a = 'xlm-prophetnet' _a = ['past_key_values'] _a = { 'num_attention_heads': 'num_encoder_attention_heads', } def __init__( self , UpperCamelCase_ = 0.1 , UpperCamelCase_ = "gelu" , UpperCamelCase_ = 30522 , UpperCamelCase_ = 1024 , UpperCamelCase_ = 4096 , UpperCamelCase_ = 12 , UpperCamelCase_ = 16 , UpperCamelCase_ = 4096 , UpperCamelCase_ = 12 , UpperCamelCase_ = 16 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 512 , UpperCamelCase_ = 0.02 , UpperCamelCase_ = True , UpperCamelCase_ = True , UpperCamelCase_ = 0 , UpperCamelCase_ = 2 , UpperCamelCase_ = 32 , UpperCamelCase_ = 128 , UpperCamelCase_ = False , UpperCamelCase_ = 0.0 , UpperCamelCase_ = True , UpperCamelCase_ = 0 , UpperCamelCase_ = 1 , UpperCamelCase_ = 2 , **UpperCamelCase_ , ): '''simple docstring''' UpperCamelCase__ :List[str] = vocab_size UpperCamelCase__ :Optional[int] = hidden_size UpperCamelCase__ :List[Any] = encoder_ffn_dim UpperCamelCase__ :Tuple = num_encoder_layers UpperCamelCase__ :Optional[int] = num_encoder_attention_heads UpperCamelCase__ :Optional[int] = decoder_ffn_dim UpperCamelCase__ :Any = num_decoder_layers UpperCamelCase__ :int = num_decoder_attention_heads UpperCamelCase__ :Union[str, Any] = max_position_embeddings UpperCamelCase__ :Any = init_std # Normal(0, this parameter) UpperCamelCase__ :str = activation_function # parameters for xlmprophetnet UpperCamelCase__ :Any = ngram UpperCamelCase__ :Dict = num_buckets UpperCamelCase__ :int = relative_max_distance UpperCamelCase__ :int = disable_ngram_loss UpperCamelCase__ :Optional[Any] = eps # 3 Types of Dropout UpperCamelCase__ :Any = attention_dropout UpperCamelCase__ :str = activation_dropout UpperCamelCase__ :List[str] = dropout UpperCamelCase__ :Union[str, Any] = use_cache super().__init__( pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , add_cross_attention=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , **__lowerCAmelCase , ) @property def lowerCAmelCase__ ( self ): '''simple docstring''' return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' raise NotImplementedError( '''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and''' ''' `num_decoder_layers`.''' )
97
"""simple docstring""" import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class a : def __init__( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=99 , __lowerCAmelCase : int=64 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Union[str, Any]=64 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : str=512 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : str=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def lowerCAmelCase_ ( self : Union[str, Any] ): return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Optional[int] ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ): _UpperCAmelCase = MPNetModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ): _UpperCAmelCase = MPNetForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = MPNetForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = MPNetForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = MPNetForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.prepare_config_and_inputs() ((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : List[Any] = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) _snake_case : Union[str, Any] = ( { 'feature-extraction': MPNetModel, 'fill-mask': MPNetForMaskedLM, 'question-answering': MPNetForQuestionAnswering, 'text-classification': MPNetForSequenceClassification, 'token-classification': MPNetForTokenClassification, 'zero-shot': MPNetForSequenceClassification, } if is_torch_available() else {} ) _snake_case : int = False _snake_case : List[Any] = True def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = MPNetModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Dict ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*__lowerCAmelCase ) @require_torch class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = MPNetModel.from_pretrained("""microsoft/mpnet-base""" ) _UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCAmelCase = model(__lowerCAmelCase )[0] _UpperCAmelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = torch.tensor( [[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
289
0
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand _a = ( '''4S 3H 2C 7S 5H''', '''9D 8H 2C 6S 7H''', '''2D 6D 9D TH 7D''', '''TC 8C 2S JH 6C''', '''JH 8S TH AH QH''', '''TS KS 5S 9S AC''', '''KD 6S 9D TH AD''', '''KS 8D 4D 9S 4S''', # pair '''8C 4S KH JS 4D''', # pair '''QH 8H KD JH 8S''', # pair '''KC 4H KS 2H 8D''', # pair '''KD 4S KC 3H 8S''', # pair '''AH 8S AS KC JH''', # pair '''3H 4C 4H 3S 2H''', # 2 pairs '''5S 5D 2C KH KH''', # 2 pairs '''3C KH 5D 5S KH''', # 2 pairs '''AS 3C KH AD KH''', # 2 pairs '''7C 7S 3S 7H 5S''', # 3 of a kind '''7C 7S KH 2H 7H''', # 3 of a kind '''AC KH QH AH AS''', # 3 of a kind '''2H 4D 3C AS 5S''', # straight (low ace) '''3C 5C 4C 2C 6H''', # straight '''6S 8S 7S 5H 9H''', # straight '''JS QS 9H TS KH''', # straight '''QC KH TS JS AH''', # straight (high ace) '''8C 9C 5C 3C TC''', # flush '''3S 8S 9S 5S KS''', # flush '''4C 5C 9C 8C KC''', # flush '''JH 8H AH KH QH''', # flush '''3D 2H 3H 2C 2D''', # full house '''2H 2C 3S 3H 3D''', # full house '''KH KC 3S 3H 3D''', # full house '''JC 6H JS JD JH''', # 4 of a kind '''JC 7H JS JD JH''', # 4 of a kind '''JC KH JS JD JH''', # 4 of a kind '''2S AS 4S 5S 3S''', # straight flush (low ace) '''2D 6D 3D 4D 5D''', # straight flush '''5C 6C 3C 7C 4C''', # straight flush '''JH 9H TH KH QH''', # straight flush '''JH AH TH KH QH''', # royal flush (high ace straight flush) ) _a = ( ('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''), ('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''), ('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''), ('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''), ('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''), ('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''), ('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''), ('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''), ('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''), ('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''), ('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''), ('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''), ('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''), ('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''), ('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''), ('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''), ('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''), ('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''), ('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''), ('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''), ('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''), ('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''), ('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''), ('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''), ('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''), ('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''), ('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''), ) _a = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', True), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', False), ('''AS 3S 4S 8S 2S''', True), ) _a = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', False), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', True), ) _a = ( ('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]), ('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]), ('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]), ('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]), ) _a = ( ('''JH AH TH KH QH''', 0), ('''JH 9H TH KH QH''', 0), ('''JC KH JS JD JH''', 7), ('''KH KC 3S 3H 3D''', 6), ('''8C 9C 5C 3C TC''', 0), ('''JS QS 9H TS KH''', 0), ('''7C 7S KH 2H 7H''', 3), ('''3C KH 5D 5S KH''', 2), ('''QH 8H KD JH 8S''', 1), ('''2D 6D 9D TH 7D''', 0), ) _a = ( ('''JH AH TH KH QH''', 2_3), ('''JH 9H TH KH QH''', 2_2), ('''JC KH JS JD JH''', 2_1), ('''KH KC 3S 3H 3D''', 2_0), ('''8C 9C 5C 3C TC''', 1_9), ('''JS QS 9H TS KH''', 1_8), ('''7C 7S KH 2H 7H''', 1_7), ('''3C KH 5D 5S KH''', 1_6), ('''QH 8H KD JH 8S''', 1_5), ('''2D 6D 9D TH 7D''', 1_4), ) def _a ( ) -> List[str]: """simple docstring""" __lowerCAmelCase , __lowerCAmelCase: Tuple = randrange(len(SCREAMING_SNAKE_CASE ) ), randrange(len(SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase: Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] = 1_00 ) -> Union[str, Any]: """simple docstring""" return (generate_random_hand() for _ in range(SCREAMING_SNAKE_CASE )) @pytest.mark.parametrize('hand, expected' , SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str ) -> int: """simple docstring""" assert PokerHand(SCREAMING_SNAKE_CASE )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ) -> str: """simple docstring""" assert PokerHand(SCREAMING_SNAKE_CASE )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __lowerCAmelCase: Dict = PokerHand(SCREAMING_SNAKE_CASE ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]: """simple docstring""" assert PokerHand(SCREAMING_SNAKE_CASE )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ) -> Tuple: """simple docstring""" assert PokerHand(SCREAMING_SNAKE_CASE )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]: """simple docstring""" assert PokerHand(SCREAMING_SNAKE_CASE ).compare_with(PokerHand(SCREAMING_SNAKE_CASE ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ) -> Dict: """simple docstring""" assert PokerHand(SCREAMING_SNAKE_CASE ).compare_with(PokerHand(SCREAMING_SNAKE_CASE ) ) == expected def _a ( ) -> Optional[Any]: """simple docstring""" __lowerCAmelCase: List[str] = [PokerHand(SCREAMING_SNAKE_CASE ) for hand in SORTED_HANDS] __lowerCAmelCase: List[Any] = poker_hands.copy() shuffle(SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Optional[int] = chain(sorted(SCREAMING_SNAKE_CASE ) ) for index, hand in enumerate(SCREAMING_SNAKE_CASE ): assert hand == poker_hands[index] def _a ( ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase: Tuple = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=SCREAMING_SNAKE_CASE ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def _a ( ) -> Any: """simple docstring""" __lowerCAmelCase: str = PokerHand('2C 4S AS 3D 5C' ) __lowerCAmelCase: Dict = True __lowerCAmelCase: int = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def _a ( ) -> Any: """simple docstring""" __lowerCAmelCase: Tuple = 0 __lowerCAmelCase: int = os.path.abspath(os.path.dirname(SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase: Tuple = os.path.join(SCREAMING_SNAKE_CASE , 'poker_hands.txt' ) with open(SCREAMING_SNAKE_CASE ) as file_hand: for line in file_hand: __lowerCAmelCase: Optional[int] = line[:14].strip() __lowerCAmelCase: Tuple = line[15:].strip() __lowerCAmelCase , __lowerCAmelCase: Dict = PokerHand(SCREAMING_SNAKE_CASE ), PokerHand(SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Tuple = player.compare_with(SCREAMING_SNAKE_CASE ) if output == "Win": answer += 1 assert answer == 3_76
322
"""simple docstring""" UpperCAmelCase__ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) UpperCAmelCase__ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 1_2, """Pm""": 1_5, """Em""": 1_8, """Zm""": 2_1, """Ym""": 2_4, } def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = from_type.lower().strip("""s""" ) _UpperCAmelCase = to_type.lower().strip("""s""" ) _UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase ) _UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase ) if from_sanitized not in METRIC_CONVERSION: _UpperCAmelCase = ( f'''Invalid \'from_type\' value: {from_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) if to_sanitized not in METRIC_CONVERSION: _UpperCAmelCase = ( f'''Invalid \'to_type\' value: {to_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) _UpperCAmelCase = METRIC_CONVERSION[from_sanitized] _UpperCAmelCase = METRIC_CONVERSION[to_sanitized] _UpperCAmelCase = 1 if from_exponent > to_exponent: _UpperCAmelCase = from_exponent - to_exponent else: _UpperCAmelCase = -(to_exponent - from_exponent) return value * pow(10 ,lowercase ) if __name__ == "__main__": from doctest import testmod testmod()
289
0
import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __a ( lowerCAmelCase_ , unittest.TestCase ): __lowercase : Dict = KandinskyVaaPriorPipeline __lowercase : List[Any] = ['prompt'] __lowercase : Optional[Any] = ['prompt', 'negative_prompt'] __lowercase : Union[str, Any] = [ 'num_images_per_prompt', 'generator', 'num_inference_steps', 'latents', 'negative_prompt', 'guidance_scale', 'output_type', 'return_dict', ] __lowercase : Union[str, Any] = False @property def SCREAMING_SNAKE_CASE__ ( self ) -> str: '''simple docstring''' return 32 @property def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]: '''simple docstring''' return 32 @property def SCREAMING_SNAKE_CASE__ ( self ) -> Dict: '''simple docstring''' return self.time_input_dim @property def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple: '''simple docstring''' return self.time_input_dim * 4 @property def SCREAMING_SNAKE_CASE__ ( self ) -> str: '''simple docstring''' return 100 @property def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' lowercase__: List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def SCREAMING_SNAKE_CASE__ ( self ) -> str: '''simple docstring''' torch.manual_seed(0 ) lowercase__: Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModelWithProjection(__lowerCAmelCase ) @property def SCREAMING_SNAKE_CASE__ ( self ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) lowercase__: Any = { 'num_attention_heads': 2, 'attention_head_dim': 12, 'embedding_dim': self.text_embedder_hidden_size, 'num_layers': 1, } lowercase__: Optional[int] = PriorTransformer(**__lowerCAmelCase ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 lowercase__: List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def SCREAMING_SNAKE_CASE__ ( self ) -> int: '''simple docstring''' torch.manual_seed(0 ) lowercase__: List[str] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) lowercase__: Optional[int] = CLIPVisionModelWithProjection(__lowerCAmelCase ) return model @property def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]: '''simple docstring''' lowercase__: Tuple = CLIPImageProcessor( crop_size=224 , do_center_crop=__lowerCAmelCase , do_normalize=__lowerCAmelCase , do_resize=__lowerCAmelCase , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , ) return image_processor def SCREAMING_SNAKE_CASE__ ( self ) -> str: '''simple docstring''' lowercase__: List[str] = self.dummy_prior lowercase__: Any = self.dummy_image_encoder lowercase__: Union[str, Any] = self.dummy_text_encoder lowercase__: Tuple = self.dummy_tokenizer lowercase__: int = self.dummy_image_processor lowercase__: int = UnCLIPScheduler( variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_000 , clip_sample=__lowerCAmelCase , clip_sample_range=1_0.0 , ) lowercase__: Union[str, Any] = { 'prior': prior, 'image_encoder': image_encoder, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'scheduler': scheduler, 'image_processor': image_processor, } return components def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Optional[Any]: '''simple docstring''' if str(__lowerCAmelCase ).startswith('mps' ): lowercase__: List[str] = torch.manual_seed(__lowerCAmelCase ) else: lowercase__: Optional[Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) lowercase__: Any = { 'prompt': 'horse', 'generator': generator, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def SCREAMING_SNAKE_CASE__ ( self ) -> Any: '''simple docstring''' lowercase__: Union[str, Any] = 'cpu' lowercase__: Any = self.get_dummy_components() lowercase__: Optional[Any] = self.pipeline_class(**__lowerCAmelCase ) lowercase__: Union[str, Any] = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) lowercase__: Optional[int] = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) ) lowercase__: int = output.image_embeds lowercase__: Optional[Any] = pipe( **self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0] lowercase__: List[str] = image[0, -10:] lowercase__: List[Any] = image_from_tuple[0, -10:] assert image.shape == (1, 32) lowercase__: int = np.array( [-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple: '''simple docstring''' lowercase__: List[Any] = torch_device == 'cpu' lowercase__: str = True lowercase__: Tuple = False self._test_inference_batch_single_identical( test_max_difference=__lowerCAmelCase , relax_max_difference=__lowerCAmelCase , test_mean_pixel_difference=__lowerCAmelCase , ) @skip_mps def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]: '''simple docstring''' lowercase__: int = torch_device == 'cpu' lowercase__: Optional[Any] = False self._test_attention_slicing_forward_pass( test_max_difference=__lowerCAmelCase , test_mean_pixel_difference=__lowerCAmelCase , )
196
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase__ = 1_6 UpperCAmelCase__ = 3_2 def __UpperCAmelCase ( lowercase ,lowercase = 16 ): """simple docstring""" _UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) _UpperCAmelCase = DataLoader( tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCAmelCase__ = mocked_dataloaders # noqa: F811 def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1": _UpperCAmelCase = 2 # Initialize accelerator _UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config["""lr"""] _UpperCAmelCase = int(config["""num_epochs"""] ) _UpperCAmelCase = int(config["""seed"""] ) _UpperCAmelCase = int(config["""batch_size"""] ) _UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowercase ) def inner_training_loop(lowercase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(lowercase ,lowercase ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowercase ,references=lowercase ,) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' ,lowercase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" ,) parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowercase ,lowercase ) if __name__ == "__main__": main()
289
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _SCREAMING_SNAKE_CASE : Union[str, Any] = { "configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"], "tokenization_roc_bert": ["RoCBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : List[str] = [ "ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RoCBertForCausalLM", "RoCBertForMaskedLM", "RoCBertForMultipleChoice", "RoCBertForPreTraining", "RoCBertForQuestionAnswering", "RoCBertForSequenceClassification", "RoCBertForTokenClassification", "RoCBertLayer", "RoCBertModel", "RoCBertPreTrainedModel", "load_tf_weights_in_roc_bert", ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys _SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
"""simple docstring""" import warnings warnings.warn( """memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """ """`from accelerate import find_executable_batch_size` to avoid this warning.""", FutureWarning, )
289
0
"""simple docstring""" import re def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ): '''simple docstring''' lowerCAmelCase = re.compile( R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" ) return bool(re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = "0094702343221" print(is_sri_lankan_phone_number(phone))
46
"""simple docstring""" import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin UpperCAmelCase__ = logging.get_logger(__name__) enable_full_determinism() class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[int] = UNetaDModel _snake_case : List[str] = 'sample' @property def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (32, 32) _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : List[Any] ): return (3, 32, 32) @property def lowerCAmelCase_ ( self : Optional[Any] ): return (3, 32, 32) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = { """block_out_channels""": (32, 64), """down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""), """up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""), """attention_head_dim""": 3, """out_channels""": 3, """in_channels""": 3, """layers_per_block""": 2, """sample_size""": 32, } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : int = UNetaDModel _snake_case : Optional[Any] = 'sample' @property def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = 4 _UpperCAmelCase = 4 _UpperCAmelCase = (32, 32) _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : Optional[Any] ): return (4, 32, 32) @property def lowerCAmelCase_ ( self : Dict ): return (4, 32, 32) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = { """sample_size""": 32, """in_channels""": 4, """out_channels""": 4, """layers_per_block""": 2, """block_out_channels""": (32, 64), """attention_head_dim""": 32, """down_block_types""": ("""DownBlock2D""", """DownBlock2D"""), """up_block_types""": ("""UpBlock2D""", """UpBlock2D"""), } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) _UpperCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) model.to(__lowerCAmelCase ) _UpperCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self : str ): # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) model_accelerate.to(__lowerCAmelCase ) model_accelerate.eval() _UpperCAmelCase = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) _UpperCAmelCase = noise.to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase ) _UpperCAmelCase = model_accelerate(__lowerCAmelCase , __lowerCAmelCase )["""sample"""] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained( """fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase , low_cpu_mem_usage=__lowerCAmelCase ) model_normal_load.to(__lowerCAmelCase ) model_normal_load.eval() _UpperCAmelCase = model_normal_load(__lowerCAmelCase , __lowerCAmelCase )["""sample"""] assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ) model.eval() model.to(__lowerCAmelCase ) _UpperCAmelCase = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) _UpperCAmelCase = noise.to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) ) class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[Any] = UNetaDModel _snake_case : str = 'sample' @property def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str=(32, 32) ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : Any ): return (3, 32, 32) @property def lowerCAmelCase_ ( self : Union[str, Any] ): return (3, 32, 32) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = { """block_out_channels""": [32, 64, 64, 64], """in_channels""": 3, """layers_per_block""": 1, """out_channels""": 3, """time_embedding_type""": """fourier""", """norm_eps""": 1e-6, """mid_block_scale_factor""": math.sqrt(2.0 ), """norm_num_groups""": None, """down_block_types""": [ """SkipDownBlock2D""", """AttnSkipDownBlock2D""", """SkipDownBlock2D""", """SkipDownBlock2D""", ], """up_block_types""": [ """SkipUpBlock2D""", """SkipUpBlock2D""", """AttnSkipUpBlock2D""", """SkipUpBlock2D""", ], } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict @slow def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) _UpperCAmelCase = self.dummy_input _UpperCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(__lowerCAmelCase ) _UpperCAmelCase = noise _UpperCAmelCase = model(**__lowerCAmelCase ) assert image is not None, "Make sure output is not None" @slow def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ) model.to(__lowerCAmelCase ) _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (256, 256) _UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" ) model.to(__lowerCAmelCase ) _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (32, 32) _UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) def lowerCAmelCase_ ( self : List[str] ): # not required for this model pass
289
0
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a ( lowerCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Tuple = DiTPipeline __lowerCAmelCase : int = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS __lowerCAmelCase : List[str] = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } __lowerCAmelCase : Tuple = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS __lowerCAmelCase : int = False def __lowerCamelCase ( self :str ): torch.manual_seed(0 ) snake_case__ : str = TransformeraDModel( sample_size=1_6 ,num_layers=2 ,patch_size=4 ,attention_head_dim=8 ,num_attention_heads=2 ,in_channels=4 ,out_channels=8 ,attention_bias=__lowerCAmelCase ,activation_fn='''gelu-approximate''' ,num_embeds_ada_norm=1_0_0_0 ,norm_type='''ada_norm_zero''' ,norm_elementwise_affine=__lowerCAmelCase ,) snake_case__ : List[str] = AutoencoderKL() snake_case__ : List[Any] = DDIMScheduler() snake_case__ : List[str] = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler} return components def __lowerCamelCase ( self :Tuple ,__lowercase :Any ,__lowercase :List[Any]=0 ): if str(__lowerCAmelCase ).startswith('''mps''' ): snake_case__ : str = torch.manual_seed(__lowerCAmelCase ) else: snake_case__ : Dict = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) snake_case__ : Tuple = { '''class_labels''': [1], '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def __lowerCamelCase ( self :Any ): snake_case__ : Union[str, Any] = '''cpu''' snake_case__ : Dict = self.get_dummy_components() snake_case__ : Dict = self.pipeline_class(**__lowerCAmelCase ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) snake_case__ : List[str] = self.get_dummy_inputs(__lowerCAmelCase ) snake_case__ : Optional[int] = pipe(**__lowerCAmelCase ).images snake_case__ : str = image[0, -3:, -3:, -1] self.assertEqual(image.shape ,(1, 1_6, 1_6, 3) ) snake_case__ : Any = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) snake_case__ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__lowerCAmelCase ,1e-3 ) def __lowerCamelCase ( self :Optional[int] ): self._test_inference_batch_single_identical(relax_max_difference=__lowerCAmelCase ,expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,) def __lowerCamelCase ( self :Optional[int] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class a ( unittest.TestCase ): def __lowerCamelCase ( self :int ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self :List[Any] ): snake_case__ : int = torch.manual_seed(0 ) snake_case__ : Any = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' ) pipe.to('''cuda''' ) snake_case__ : Optional[Any] = ['''vase''', '''umbrella''', '''white shark''', '''white wolf'''] snake_case__ : Tuple = pipe.get_label_ids(__lowerCAmelCase ) snake_case__ : Optional[int] = pipe(__lowerCAmelCase ,generator=__lowerCAmelCase ,num_inference_steps=4_0 ,output_type='''np''' ).images for word, image in zip(__lowerCAmelCase ,__lowerCAmelCase ): snake_case__ : Optional[int] = load_numpy( F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" ) assert np.abs((expected_image - image).max() ) < 1e-2 def __lowerCamelCase ( self :int ): snake_case__ : int = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' ) snake_case__ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('''cuda''' ) snake_case__ : List[Any] = ['''vase''', '''umbrella'''] snake_case__ : List[str] = pipe.get_label_ids(__lowerCAmelCase ) snake_case__ : Optional[Any] = torch.manual_seed(0 ) snake_case__ : Tuple = pipe(__lowerCAmelCase ,generator=__lowerCAmelCase ,num_inference_steps=2_5 ,output_type='''np''' ).images for word, image in zip(__lowerCAmelCase ,__lowerCAmelCase ): snake_case__ : Tuple = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' F"""/dit/{word}_512.npy""" ) assert np.abs((expected_image - image).max() ) < 1e-1
230
"""simple docstring""" import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : int = StableUnCLIPPipeline _snake_case : str = TEXT_TO_IMAGE_PARAMS _snake_case : Any = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS _snake_case : str = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _snake_case : str = False def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = 32 _UpperCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) _UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=__lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) _UpperCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__lowerCAmelCase , num_layers=1 , ) torch.manual_seed(0 ) _UpperCAmelCase = DDPMScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowerCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , ) # regular denoising components torch.manual_seed(0 ) _UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=__lowerCAmelCase ) _UpperCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) _UpperCAmelCase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCAmelCase , layers_per_block=1 , upcast_attention=__lowerCAmelCase , use_linear_projection=__lowerCAmelCase , ) torch.manual_seed(0 ) _UpperCAmelCase = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , ) torch.manual_seed(0 ) _UpperCAmelCase = AutoencoderKL() _UpperCAmelCase = { # prior components """prior_tokenizer""": prior_tokenizer, """prior_text_encoder""": prior_text_encoder, """prior""": prior, """prior_scheduler""": prior_scheduler, # image noising components """image_normalizer""": image_normalizer, """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder, """unet""": unet, """scheduler""": scheduler, """vae""": vae, } return components def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=0 ): if str(__lowerCAmelCase ).startswith("""mps""" ): _UpperCAmelCase = torch.manual_seed(__lowerCAmelCase ) else: _UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _UpperCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """prior_num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = torch_device == """cpu""" self._test_attention_slicing_forward_pass(test_max_difference=__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=__lowerCAmelCase ) @slow @require_torch_gpu class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : str ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" ) _UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) _UpperCAmelCase = pipe("""anime turle""" , generator=__lowerCAmelCase , output_type="""np""" ) _UpperCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) _UpperCAmelCase = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _UpperCAmelCase = pipe( """anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , ) _UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
289
0
"""simple docstring""" import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def UpperCamelCase ( _lowerCAmelCase : Dict, _lowerCAmelCase : Dict, _lowerCAmelCase : Dict ) -> Dict: # Construct model if gpta_config_file == "": _UpperCAmelCase : Any = GPTaConfig() else: _UpperCAmelCase : Any = GPTaConfig.from_json_file(_lowerCAmelCase ) _UpperCAmelCase : Union[str, Any] = GPTaModel(_lowerCAmelCase ) # Load weights from numpy load_tf_weights_in_gpta(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) # Save pytorch-model _UpperCAmelCase : List[str] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME _UpperCAmelCase : str = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' ) torch.save(model.state_dict(), _lowerCAmelCase ) print(f'''Save configuration file to {pytorch_config_dump_path}''' ) with open(_lowerCAmelCase, """w""", encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCamelCase__ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--gpt2_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained OpenAI model. \n''' '''This specifies the model architecture.''' ), ) lowerCamelCase__ : List[str] = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
246
"""simple docstring""" from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
289
0
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def _A ( lowercase__ ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def _A ( lowercase__ ): # word like '180' or '身高' or '神' for char in word: lowercase__ = ord(lowercase__ ) if not _is_chinese_char(lowercase__ ): return 0 return 1 def _A ( lowercase__ ): lowercase__ = set() for token in tokens: lowercase__ = len(lowercase__ ) > 1 and is_chinese(lowercase__ ) if chinese_word: word_set.add(lowercase__ ) lowercase__ = list(lowercase__ ) return word_list def _A ( lowercase__ , lowercase__ ): if not chinese_word_set: return bert_tokens lowercase__ = max([len(lowercase__ ) for w in chinese_word_set] ) lowercase__ = bert_tokens lowercase__ , lowercase__ = 0, len(lowercase__ ) while start < end: lowercase__ = True if is_chinese(bert_word[start] ): lowercase__ = min(end - start , lowercase__ ) for i in range(lowercase__ , 1 , -1 ): lowercase__ = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowercase__ = """##""" + bert_word[j] lowercase__ = start + i lowercase__ = False break if single_word: start += 1 return bert_word def _A ( lowercase__ , lowercase__ , lowercase__ ): lowercase__ = [] for i in range(0 , len(lowercase__ ) , 100 ): lowercase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws lowercase__ = [get_chinese_word(lowercase__ ) for r in res] ltp_res.extend(lowercase__ ) assert len(lowercase__ ) == len(lowercase__ ) lowercase__ = [] for i in range(0 , len(lowercase__ ) , 100 ): lowercase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowercase__ , truncation=lowercase__ , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(lowercase__ ) == len(lowercase__ ) lowercase__ = [] for input_ids, chinese_word in zip(lowercase__ , lowercase__ ): lowercase__ = [] for id in input_ids: lowercase__ = bert_tokenizer._convert_id_to_token(lowercase__ ) input_tokens.append(lowercase__ ) lowercase__ = add_sub_symbol(lowercase__ , lowercase__ ) lowercase__ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(lowercase__ ): if token[:2] == "##": lowercase__ = token[2:] # save chinese tokens' pos if len(lowercase__ ) == 1 and _is_chinese_char(ord(lowercase__ ) ): ref_id.append(lowercase__ ) ref_ids.append(lowercase__ ) assert len(lowercase__ ) == len(lowercase__ ) return ref_ids def _A ( lowercase__ ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: lowercase__ = f.readlines() lowercase__ = [line.strip() for line in data if len(lowercase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowercase__ = LTP(args.ltp ) # faster in GPU device lowercase__ = BertTokenizer.from_pretrained(args.bert ) lowercase__ = prepare_ref(lowercase__ , lowercase__ , lowercase__ ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: lowercase__ = [json.dumps(lowercase__ ) + """\n""" for ref in ref_ids] f.writelines(lowercase__ ) if __name__ == "__main__": __A = argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", required=False, type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", required=False, type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path", ) parser.add_argument( "--bert", required=False, type=str, default="./resources/robert", help="resources for Bert tokenizer", ) parser.add_argument( "--save_path", required=False, type=str, default="./resources/ref.txt", help="path to save res", ) __A = parser.parse_args() main(args)
164
"""simple docstring""" import requests UpperCAmelCase__ = """""" # <-- Put your OpenWeatherMap appid here! UpperCAmelCase__ = """https://api.openweathermap.org/data/2.5/""" def __UpperCAmelCase ( lowercase = "Chicago" ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """weather""" ,params=locals() ).json() def __UpperCAmelCase ( lowercase = "Kolkata, India" ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """forecast""" ,params=locals() ).json() def __UpperCAmelCase ( lowercase = 55.68 ,lowercase = 12.57 ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """onecall""" ,params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: UpperCAmelCase__ = input("""Enter a location:""").strip() if location: pprint(current_weather(location)) else: break
289
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase__ : Any = { 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class snake_case ( lowerCAmelCase_ ): """simple docstring""" snake_case__ = 'beit' def __init__( self : Optional[Any] ,lowerCamelCase__ : Any=8_192 ,lowerCamelCase__ : Tuple=768 ,lowerCamelCase__ : str=12 ,lowerCamelCase__ : Optional[int]=12 ,lowerCamelCase__ : Union[str, Any]=3_072 ,lowerCamelCase__ : List[str]="gelu" ,lowerCamelCase__ : Dict=0.0 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : Optional[int]=0.0_2 ,lowerCamelCase__ : int=1e-12 ,lowerCamelCase__ : Optional[Any]=224 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : Dict=3 ,lowerCamelCase__ : Optional[int]=False ,lowerCamelCase__ : Any=False ,lowerCamelCase__ : Optional[int]=False ,lowerCamelCase__ : Optional[int]=False ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : Optional[int]=[3, 5, 7, 11] ,lowerCamelCase__ : Tuple=[1, 2, 3, 6] ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Optional[int]=0.4 ,lowerCamelCase__ : List[Any]=256 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : List[Any]=255 ,**lowerCamelCase__ : List[str] ,): super().__init__(**__lowerCAmelCase ) UpperCAmelCase__ = vocab_size UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_act UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = initializer_range UpperCAmelCase__ = layer_norm_eps UpperCAmelCase__ = image_size UpperCAmelCase__ = patch_size UpperCAmelCase__ = num_channels UpperCAmelCase__ = use_mask_token UpperCAmelCase__ = use_absolute_position_embeddings UpperCAmelCase__ = use_relative_position_bias UpperCAmelCase__ = use_shared_relative_position_bias UpperCAmelCase__ = layer_scale_init_value UpperCAmelCase__ = drop_path_rate UpperCAmelCase__ = use_mean_pooling # decode head attributes (semantic segmentation) UpperCAmelCase__ = out_indices UpperCAmelCase__ = pool_scales # auxiliary head attributes (semantic segmentation) UpperCAmelCase__ = use_auxiliary_head UpperCAmelCase__ = auxiliary_loss_weight UpperCAmelCase__ = auxiliary_channels UpperCAmelCase__ = auxiliary_num_convs UpperCAmelCase__ = auxiliary_concat_input UpperCAmelCase__ = semantic_loss_ignore_index class snake_case ( lowerCAmelCase_ ): """simple docstring""" snake_case__ = version.parse("1.11" ) @property def __lowerCAmelCase ( self : Tuple ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __lowerCAmelCase ( self : Optional[Any] ): return 1e-4
98
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = get_failure_array(lowercase ) # 2) Step through text searching for pattern _UpperCAmelCase , _UpperCAmelCase = 0, 0 # index into text, pattern while i < len(lowercase ): if pattern[j] == text[i]: if j == (len(lowercase ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: _UpperCAmelCase = failure[j - 1] continue i += 1 return False def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [0] _UpperCAmelCase = 0 _UpperCAmelCase = 1 while j < len(lowercase ): if pattern[i] == pattern[j]: i += 1 elif i > 0: _UpperCAmelCase = failure[i - 1] continue j += 1 failure.append(lowercase ) return failure if __name__ == "__main__": # Test 1) UpperCAmelCase__ = """abc1abc12""" UpperCAmelCase__ = """alskfjaldsabc1abc1abc12k23adsfabcabc""" UpperCAmelCase__ = """alskfjaldsk23adsfabcabc""" assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) UpperCAmelCase__ = """ABABX""" UpperCAmelCase__ = """ABABZABABYABABX""" assert kmp(pattern, text) # Test 3) UpperCAmelCase__ = """AAAB""" UpperCAmelCase__ = """ABAAAAAB""" assert kmp(pattern, text) # Test 4) UpperCAmelCase__ = """abcdabcy""" UpperCAmelCase__ = """abcxabcdabxabcdabcdabcy""" assert kmp(pattern, text) # Test 5) UpperCAmelCase__ = """aabaabaaa""" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
289
0
'''simple docstring''' import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCAmelCase : def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ): A_ : int = parent A_ : Dict = batch_size A_ : str = seq_length A_ : List[Any] = is_training A_ : List[str] = use_input_mask A_ : Optional[int] = use_token_type_ids A_ : Optional[int] = use_labels A_ : int = vocab_size A_ : Tuple = hidden_size A_ : List[Any] = num_hidden_layers A_ : int = num_attention_heads A_ : List[str] = intermediate_size A_ : Dict = hidden_act A_ : List[str] = hidden_dropout_prob A_ : List[Any] = attention_probs_dropout_prob A_ : int = max_position_embeddings A_ : Optional[int] = type_vocab_size A_ : Dict = type_sequence_label_size A_ : Tuple = initializer_range A_ : List[Any] = num_labels A_ : Optional[int] = num_choices A_ : str = scope def _a (self ): A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : Any = None if self.use_input_mask: A_ : str = random_attention_mask([self.batch_size, self.seq_length] ) A_ : Union[str, Any] = None if self.use_token_type_ids: A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ : Optional[int] = None A_ : Optional[Any] = None A_ : str = None if self.use_labels: A_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) A_ : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _a (self ): return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): A_ : Optional[int] = BioGptModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A_ : int = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ) A_ : Any = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ): A_ : Optional[Any] = BioGptForCausalLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A_ : str = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ): A_ : Optional[int] = BioGptModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() # create attention mask A_ : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__lowerCAmelCase ) A_ : Tuple = self.seq_length // 2 A_ : List[Any] = 0 # first forward pass A_, A_ : int = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ).to_tuple() # create hypothetical next token and extent to next_input_ids A_ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids A_ : List[Any] = ids_tensor((1,) , __lowerCAmelCase ).item() + 1 A_ : Dict = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) A_ : List[str] = random_other_next_tokens # append to next input_ids and attn_mask A_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) A_ : List[str] = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__lowerCAmelCase )] , dim=1 , ) # get two different outputs A_ : Union[str, Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )["""last_hidden_state"""] A_ : Any = model(__lowerCAmelCase , past_key_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase )["""last_hidden_state"""] # select random slice A_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() A_ : str = output_from_no_past[:, -1, random_slice_idx].detach() A_ : Any = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) ) def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ): A_ : List[str] = BioGptModel(config=__lowerCAmelCase ).to(__lowerCAmelCase ).eval() A_ : Union[str, Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__lowerCAmelCase ) # first forward pass A_ : Any = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase ) A_, A_ : Union[str, Any] = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids A_ : str = ids_tensor((self.batch_size, 3) , config.vocab_size ) A_ : Dict = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and A_ : int = torch.cat([input_ids, next_tokens] , dim=-1 ) A_ : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) A_ : Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )["""last_hidden_state"""] A_ : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase )[ """last_hidden_state""" ] # select random slice A_ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item() A_ : Dict = output_from_no_past[:, -3:, random_slice_idx].detach() A_ : Dict = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) ) def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase , lowercase=False ): A_ : Any = BioGptForCausalLM(__lowerCAmelCase ) model.to(__lowerCAmelCase ) if gradient_checkpointing: model.gradient_checkpointing_enable() A_ : Dict = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def _a (self , lowercase , *lowercase ): A_ : str = BioGptModel(__lowerCAmelCase ) A_ : Optional[int] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ): A_ : Optional[int] = self.num_labels A_ : Optional[int] = BioGptForTokenClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A_ : Any = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _a (self ): A_ : str = self.prepare_config_and_inputs() ( ( A_ ), ( A_ ), ( A_ ), ( A_ ), ( A_ ), ( A_ ), ( A_ ), ) : Dict = config_and_inputs A_ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : Tuple = (BioGptForCausalLM,) if is_torch_available() else () __SCREAMING_SNAKE_CASE : Optional[Any] = ( { 'feature-extraction': BioGptModel, 'text-classification': BioGptForSequenceClassification, 'text-generation': BioGptForCausalLM, 'token-classification': BioGptForTokenClassification, 'zero-shot': BioGptForSequenceClassification, } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : int = False def _a (self ): A_ : int = BioGptModelTester(self ) A_ : List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def _a (self ): self.config_tester.run_common_tests() def _a (self ): A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def _a (self ): A_ : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A_ : Dict = type self.model_tester.create_and_check_model(*__lowerCAmelCase ) def _a (self ): A_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__lowerCAmelCase ) def _a (self ): A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*__lowerCAmelCase , gradient_checkpointing=__lowerCAmelCase ) def _a (self ): A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__lowerCAmelCase ) def _a (self ): A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*__lowerCAmelCase ) def _a (self ): A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*__lowerCAmelCase ) @slow def _a (self ): A_ : Dict = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(__lowerCAmelCase ) A_ : Optional[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) A_ : List[str] = """left""" # Define PAD Token = EOS Token = 50256 A_ : int = tokenizer.eos_token A_ : str = model.config.eos_token_id # use different length sentences to test batching A_ : Tuple = [ """Hello, my dog is a little""", """Today, I""", ] A_ : List[str] = tokenizer(__lowerCAmelCase , return_tensors="""pt""" , padding=__lowerCAmelCase ) A_ : Any = inputs["""input_ids"""].to(__lowerCAmelCase ) A_ : Optional[int] = model.generate( input_ids=__lowerCAmelCase , attention_mask=inputs["""attention_mask"""].to(__lowerCAmelCase ) , ) A_ : Optional[Any] = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(__lowerCAmelCase ) A_ : Optional[int] = model.generate(input_ids=__lowerCAmelCase ) A_ : Tuple = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item() A_ : List[Any] = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(__lowerCAmelCase ) A_ : Union[str, Any] = model.generate(input_ids=__lowerCAmelCase , max_length=model.config.max_length - num_paddings ) A_ : Any = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) A_ : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowerCAmelCase ) A_ : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=__lowerCAmelCase ) A_ : Any = [ """Hello, my dog is a little bit bigger than a little bit.""", """Today, I have a good idea of how to use the information""", ] self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , [non_padded_sentence, padded_sentence] ) @slow def _a (self ): for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : int = BioGptModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def _a (self ): A_, A_ : Any = self.model_tester.prepare_config_and_inputs_for_common() A_ : Tuple = 3 A_ : Tuple = input_dict["""input_ids"""] A_ : List[Any] = input_ids.ne(1 ).to(__lowerCAmelCase ) A_ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) A_ : Optional[int] = BioGptForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A_ : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _a (self ): A_, A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() A_ : List[str] = 3 A_ : List[Any] = """multi_label_classification""" A_ : Dict = input_dict["""input_ids"""] A_ : Any = input_ids.ne(1 ).to(__lowerCAmelCase ) A_ : Optional[int] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) A_ : List[str] = BioGptForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A_ : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): @slow def _a (self ): A_ : Optional[int] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) A_ : Union[str, Any] = torch.tensor([[2, 4805, 9, 656, 21]] ) A_ : Optional[Any] = model(__lowerCAmelCase )[0] A_ : List[Any] = 42384 A_ : Union[str, Any] = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , __lowerCAmelCase ) A_ : Tuple = torch.tensor( [[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) ) @slow def _a (self ): A_ : List[str] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) A_ : str = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(__lowerCAmelCase ) torch.manual_seed(0 ) A_ : str = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(__lowerCAmelCase ) A_ : int = model.generate( **__lowerCAmelCase , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=__lowerCAmelCase , ) A_ : Tuple = tokenizer.decode(output_ids[0] , skip_special_tokens=__lowerCAmelCase ) A_ : Dict = ( """COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the""" """ causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and""" """ territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),""" """ and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and""" """ more than 800,000 deaths.""" ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
206
"""simple docstring""" from sklearn.metrics import recall_score import datasets UpperCAmelCase__ = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ UpperCAmelCase__ = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ UpperCAmelCase__ = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): def lowerCAmelCase_ ( self : Tuple ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]="binary" , __lowerCAmelCase : Any=None , __lowerCAmelCase : int="warn" , ): _UpperCAmelCase = recall_score( __lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase , zero_division=__lowerCAmelCase , ) return {"recall": float(__lowerCAmelCase ) if score.size == 1 else score}
289
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class lowerCamelCase ( lowerCAmelCase_ ): '''simple docstring''' _A : Dict = 'open-llama' def __init__( self: int , snake_case: Any=100_000 , snake_case: Union[str, Any]=4_096 , snake_case: List[str]=11_008 , snake_case: Optional[Any]=32 , snake_case: List[Any]=32 , snake_case: str="silu" , snake_case: List[Any]=2_048 , snake_case: Tuple=0.0_2 , snake_case: Optional[int]=1E-6 , snake_case: Tuple=True , snake_case: int=0 , snake_case: Optional[Any]=1 , snake_case: Union[str, Any]=2 , snake_case: Dict=False , snake_case: Dict=True , snake_case: List[Any]=0.1 , snake_case: Dict=0.1 , snake_case: List[str]=True , snake_case: str=True , snake_case: str=None , **snake_case: Optional[Any] , ) -> Tuple: snake_case_ :str = vocab_size snake_case_ :int = max_position_embeddings snake_case_ :List[str] = hidden_size snake_case_ :int = intermediate_size snake_case_ :Optional[Any] = num_hidden_layers snake_case_ :Optional[Any] = num_attention_heads snake_case_ :str = hidden_act snake_case_ :str = initializer_range snake_case_ :List[str] = rms_norm_eps snake_case_ :str = use_cache snake_case_ :str = kwargs.pop( """use_memorry_efficient_attention""" , __lowerCAmelCase ) snake_case_ :List[str] = hidden_dropout_prob snake_case_ :Dict = attention_dropout_prob snake_case_ :List[Any] = use_stable_embedding snake_case_ :str = shared_input_output_embedding snake_case_ :Optional[int] = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , tie_word_embeddings=__lowerCAmelCase , **__lowerCAmelCase , ) def lowerCAmelCase_ ( self: List[Any] ) -> Optional[Any]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __lowerCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f"""got {self.rope_scaling}""" ) snake_case_ :Union[str, Any] = self.rope_scaling.get("""type""" , __lowerCAmelCase ) snake_case_ :int = self.rope_scaling.get("""factor""" , __lowerCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"""`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f"""`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}""" )
66
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase__ = """platform""" import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class a : _snake_case : Tuple = PegasusConfig _snake_case : int = {} _snake_case : str = 'gelu' def __init__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=99 , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=20 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Any=0 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = eos_token_id _UpperCAmelCase = pad_token_id _UpperCAmelCase = bos_token_id def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) _UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) _UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _UpperCAmelCase = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return config, inputs_dict def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ): _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] ) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ): _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] ) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _UpperCAmelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase ) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ,): """simple docstring""" if attention_mask is None: _UpperCAmelCase = np.not_equal(lowercase ,config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: _UpperCAmelCase = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ), ] ,axis=-1 ,) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Dict = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) _snake_case : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () _snake_case : Optional[Any] = True _snake_case : List[str] = False _snake_case : Dict = False _snake_case : str = False def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = FlaxPegasusModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_class(__lowerCAmelCase ) @jax.jit def encode_jitted(__lowerCAmelCase : str , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Dict ): return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase ) with self.subTest("""JIT Enabled""" ): _UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCAmelCase = model_class(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) _UpperCAmelCase = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(__lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ): return model.decode( decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , ) with self.subTest("""JIT Enabled""" ): _UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCAmelCase_ ( self : Optional[int] ): for model_class_name in self.all_model_classes: _UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__lowerCAmelCase ) _UpperCAmelCase = np.ones((1, 1) ) _UpperCAmelCase = model(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) _UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) _UpperCAmelCase = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] _UpperCAmelCase = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] _UpperCAmelCase = tokenizer(__lowerCAmelCase , return_tensors="""np""" , truncation=__lowerCAmelCase , max_length=512 , padding=__lowerCAmelCase ) _UpperCAmelCase = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences _UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) assert tgt_text == decoded
289
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowercase ( metaclass=lowerCAmelCase_ ): """simple docstring""" _a = ['torch', 'torchsde'] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ): '''simple docstring''' requires_backends(self , ['''torch''', '''torchsde'''] ) @classmethod def lowerCAmelCase__ ( cls , *UpperCamelCase_ , **UpperCamelCase_ ): '''simple docstring''' requires_backends(cls , ['''torch''', '''torchsde'''] ) @classmethod def lowerCAmelCase__ ( cls , *UpperCamelCase_ , **UpperCamelCase_ ): '''simple docstring''' requires_backends(cls , ['''torch''', '''torchsde'''] )
97
"""simple docstring""" import math def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [] _UpperCAmelCase = 2 _UpperCAmelCase = int(math.sqrt(lowercase ) ) # Size of every segment _UpperCAmelCase = [True] * (end + 1) _UpperCAmelCase = [] while start <= end: if temp[start] is True: in_prime.append(lowercase ) for i in range(start * start ,end + 1 ,lowercase ): _UpperCAmelCase = False start += 1 prime += in_prime _UpperCAmelCase = end + 1 _UpperCAmelCase = min(2 * end ,lowercase ) while low <= n: _UpperCAmelCase = [True] * (high - low + 1) for each in in_prime: _UpperCAmelCase = math.floor(low / each ) * each if t < low: t += each for j in range(lowercase ,high + 1 ,lowercase ): _UpperCAmelCase = False for j in range(len(lowercase ) ): if temp[j] is True: prime.append(j + low ) _UpperCAmelCase = high + 1 _UpperCAmelCase = min(high + end ,lowercase ) return prime print(sieve(1_0**6))
289
0
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { '''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''', } class A_ ( lowerCAmelCase_ ): _lowercase : Any = 'efficientnet' def __init__( self : Any , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 6_0_0 , UpperCAmelCase : float = 2.0 , UpperCAmelCase : float = 3.1 , UpperCAmelCase : int = 8 , UpperCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , UpperCAmelCase : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , UpperCAmelCase : List[int] = [] , UpperCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase : float = 0.25 , UpperCAmelCase : str = "swish" , UpperCAmelCase : int = 2_5_6_0 , UpperCAmelCase : str = "mean" , UpperCAmelCase : float = 0.02 , UpperCAmelCase : float = 0.001 , UpperCAmelCase : float = 0.99 , UpperCAmelCase : float = 0.5 , UpperCAmelCase : float = 0.2 , **UpperCAmelCase : List[Any] , ) -> Optional[Any]: super().__init__(**__lowerCAmelCase ) __lowerCAmelCase: Union[str, Any] = num_channels __lowerCAmelCase: Optional[Any] = image_size __lowerCAmelCase: Any = width_coefficient __lowerCAmelCase: Tuple = depth_coefficient __lowerCAmelCase: List[str] = depth_divisor __lowerCAmelCase: Any = kernel_sizes __lowerCAmelCase: int = in_channels __lowerCAmelCase: Dict = out_channels __lowerCAmelCase: Tuple = depthwise_padding __lowerCAmelCase: Union[str, Any] = strides __lowerCAmelCase: Any = num_block_repeats __lowerCAmelCase: Dict = expand_ratios __lowerCAmelCase: Tuple = squeeze_expansion_ratio __lowerCAmelCase: List[Any] = hidden_act __lowerCAmelCase: Tuple = hidden_dim __lowerCAmelCase: str = pooling_type __lowerCAmelCase: List[Any] = initializer_range __lowerCAmelCase: Tuple = batch_norm_eps __lowerCAmelCase: Optional[Any] = batch_norm_momentum __lowerCAmelCase: Optional[int] = dropout_rate __lowerCAmelCase: Optional[Any] = drop_connect_rate __lowerCAmelCase: str = sum(__lowerCAmelCase ) * 4 class A_ ( lowerCAmelCase_ ): _lowercase : Dict = version.parse('1.11' ) @property def UpperCAmelCase ( self : Any ) -> List[Any]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def UpperCAmelCase ( self : int ) -> Optional[int]: return 1E-5
322
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _UpperCAmelCase = TapasConfig.from_json_file(lowercase ) # set absolute/relative position embeddings parameter _UpperCAmelCase = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "WTQ": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = True # hparam_utils.py hparams _UpperCAmelCase = 0.66_46_94 _UpperCAmelCase = 0.20_79_51 _UpperCAmelCase = 0.12_11_94 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = 0.0_35_25_13 _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = False # hparam_utils.py hparams _UpperCAmelCase = 36.45_19 _UpperCAmelCase = 0.90_34_21 _UpperCAmelCase = 2_22.0_88 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = 0.76_31_41 _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "TABFACT": _UpperCAmelCase = TapasForSequenceClassification(config=lowercase ) elif task == "MLM": _UpperCAmelCase = TapasForMaskedLM(config=lowercase ) elif task == "INTERMEDIATE_PRETRAINING": _UpperCAmelCase = TapasModel(config=lowercase ) else: raise ValueError(f'''Task {task} not supported.''' ) print(f'''Building PyTorch model from configuration: {config}''' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(lowercase ,lowercase ,lowercase ) # Save pytorch-model (weights and configuration) print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(lowercase ) # Save tokenizer files print(f'''Save tokenizer files to {pytorch_dump_path}''' ) _UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" ,model_max_length=5_12 ) tokenizer.save_pretrained(lowercase ) print("""Used relative position embeddings:""" ,model.config.reset_position_index_per_cell ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
289
0
from __future__ import annotations from collections import Counter from random import random class __a : def __init__( self ) -> int: '''simple docstring''' lowercase__: str = {} def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Union[str, Any]: '''simple docstring''' lowercase__: Optional[Any] = {} def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: '''simple docstring''' if nodea not in self.connections: self.add_node(__lowerCAmelCase ) if nodea not in self.connections: self.add_node(__lowerCAmelCase ) lowercase__: str = probability def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' return list(self.connections ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Any: '''simple docstring''' lowercase__: Any = 0 lowercase__: Optional[int] = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def snake_case_ ( snake_case , snake_case , snake_case ) -> Union[str, Any]: lowercase__: List[str] = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(snake_case , snake_case , snake_case ) lowercase__: Union[str, Any] = Counter(graph.get_nodes() ) lowercase__: Dict = start for _ in range(snake_case ): lowercase__: Union[str, Any] = graph.transition(snake_case ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
196
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml UpperCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" def run_func(lowercase ): @wraps(lowercase ) def run_in_eager_mode(*lowercase ,**lowercase ): return func(*lowercase ,**lowercase ) @wraps(lowercase ) @tf.function(experimental_compile=lowercase ) def run_in_graph_mode(*lowercase ,**lowercase ): return func(*lowercase ,**lowercase ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = random.Random() _UpperCAmelCase = [rng.randint(0 ,vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(lowercase ,shape=(batch_size, sequence_length) ,dtype=tf.intaa ) class a ( lowerCAmelCase_ ): _snake_case : TensorFlowBenchmarkArguments _snake_case : PretrainedConfig _snake_case : str = "TensorFlow" @property def lowerCAmelCase_ ( self : Union[str, Any] ): return tf.__version__ def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): # initialize GPU on separate process _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_speed(_inference ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_speed(_train ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase ) _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_memory(_inference ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase ) _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_memory(_train ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _UpperCAmelCase = ( hasattr(__lowerCAmelCase , """architectures""" ) and isinstance(config.architectures , __lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_cls(__lowerCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _UpperCAmelCase = TF_MODEL_MAPPING[config.__class__](__lowerCAmelCase ) # encoder-decoder has vocab size saved differently _UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size _UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , training=__lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(__lowerCAmelCase , training=__lowerCAmelCase ) _UpperCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _UpperCAmelCase = ( hasattr(__lowerCAmelCase , """architectures""" ) and isinstance(config.architectures , __lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_cls(__lowerCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _UpperCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__lowerCAmelCase ) # encoder-decoder has vocab size saved differently _UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size _UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): _UpperCAmelCase = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0] _UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): _UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0] _UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables ) return gradients _UpperCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Any ): with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(__lowerCAmelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _UpperCAmelCase = timeit.repeat( __lowerCAmelCase , repeat=self.args.repeat , number=10 , ) return min(__lowerCAmelCase ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Callable[[], None] ): logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _UpperCAmelCase = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _UpperCAmelCase = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _UpperCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _UpperCAmelCase = nvml.nvmlDeviceGetMemoryInfo(__lowerCAmelCase ) _UpperCAmelCase = meminfo.used _UpperCAmelCase = Memory(__lowerCAmelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _UpperCAmelCase = None else: _UpperCAmelCase = measure_peak_memory_cpu(__lowerCAmelCase ) _UpperCAmelCase = Memory(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else memory_bytes if self.args.trace_memory_line_by_line: _UpperCAmelCase = stop_memory_tracing(__lowerCAmelCase ) if memory is None: _UpperCAmelCase = summary.total else: _UpperCAmelCase = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) return "N/A", None
289
0
'''simple docstring''' import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : Dict ): '''simple docstring''' snake_case_ = TapasConfig.from_json_file(snake_case ) # set absolute/relative position embeddings parameter snake_case_ = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": snake_case_ = TapasForQuestionAnswering(config=snake_case ) elif task == "WTQ": # run_task_main.py hparams snake_case_ = 4 snake_case_ = True # hparam_utils.py hparams snake_case_ = 0.664694 snake_case_ = 0.207951 snake_case_ = 0.121194 snake_case_ = True snake_case_ = True snake_case_ = False snake_case_ = 0.0352513 snake_case_ = TapasForQuestionAnswering(config=snake_case ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams snake_case_ = 4 snake_case_ = False # hparam_utils.py hparams snake_case_ = 36.4519 snake_case_ = 0.903421 snake_case_ = 2_2_2.0_8_8 snake_case_ = True snake_case_ = True snake_case_ = True snake_case_ = 0.763141 snake_case_ = TapasForQuestionAnswering(config=snake_case ) elif task == "TABFACT": snake_case_ = TapasForSequenceClassification(config=snake_case ) elif task == "MLM": snake_case_ = TapasForMaskedLM(config=snake_case ) elif task == "INTERMEDIATE_PRETRAINING": snake_case_ = TapasModel(config=snake_case ) else: raise ValueError(f'Task {task} not supported.' ) print(f'Building PyTorch model from configuration: {config}' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(snake_case , snake_case , snake_case ) # Save pytorch-model (weights and configuration) print(f'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(snake_case ) # Save tokenizer files print(f'Save tokenizer files to {pytorch_dump_path}' ) snake_case_ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-1_0] + "vocab.txt" , model_max_length=5_1_2 ) tokenizer.save_pretrained(snake_case ) print("Used relative position embeddings:" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA." ) parser.add_argument( "--reset_position_index_per_cell", default=False, action="store_true", help="Whether to use relative position embeddings or not. Defaults to True.", ) parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--tapas_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained TAPAS model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _SCREAMING_SNAKE_CASE : Dict = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
85
"""simple docstring""" from math import pow def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,): """simple docstring""" if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count _UpperCAmelCase = int(pow(lowercase ,lowercase ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n _UpperCAmelCase , _UpperCAmelCase = backtrack( lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. _UpperCAmelCase , _UpperCAmelCase = backtrack( lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase ) return current_sum, solutions_count def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10): raise ValueError( """Invalid input\n""" """needed_sum must be between 1 and 1000, power between 2 and 10.""" ) return backtrack(lowercase ,lowercase ,1 ,0 ,0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
289
0
"""simple docstring""" import math SCREAMING_SNAKE_CASE__ = 10 SCREAMING_SNAKE_CASE__ = 7 SCREAMING_SNAKE_CASE__ = BALLS_PER_COLOUR * NUM_COLOURS def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] = 20 ): '''simple docstring''' lowerCAmelCase = math.comb(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowerCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , SCREAMING_SNAKE_CASE ) lowerCAmelCase = NUM_COLOURS * (1 - missing_colour / total) return F'{result:.9f}' if __name__ == "__main__": print(solution(20))
46
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } UpperCAmelCase__ = { """b0""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 2_2_4, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 2_4_0, """dropout_rate""": 0.2, """dw_padding""": [1_6], }, """b2""": { """hidden_dim""": 1_4_0_8, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 2_6_0, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 1_6], }, """b3""": { """hidden_dim""": 1_5_3_6, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 3_0_0, """dropout_rate""": 0.3, """dw_padding""": [5, 1_8], }, """b4""": { """hidden_dim""": 1_7_9_2, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 3_8_0, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_0_4_8, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 4_5_6, """dropout_rate""": 0.4, """dw_padding""": [1_3, 2_7], }, """b6""": { """hidden_dim""": 2_3_0_4, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 5_2_8, """dropout_rate""": 0.5, """dw_padding""": [3_1], }, """b7""": { """hidden_dim""": 2_5_6_0, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 6_0_0, """dropout_rate""": 0.5, """dw_padding""": [1_8], }, } def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = EfficientNetConfig() _UpperCAmelCase = CONFIG_MAP[model_name]["""hidden_dim"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""width_coef"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""depth_coef"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""dropout_rate"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""dw_padding"""] _UpperCAmelCase = """huggingface/label-files""" _UpperCAmelCase = """imagenet-1k-id2label.json""" _UpperCAmelCase = 10_00 _UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) ) _UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw ) return im def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = EfficientNetImageProcessor( size={"""height""": size, """width""": size} ,image_mean=[0.4_85, 0.4_56, 0.4_06] ,image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] ,do_center_crop=lowercase ,) return preprocessor def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] _UpperCAmelCase = sorted(set(lowercase ) ) _UpperCAmelCase = len(lowercase ) _UpperCAmelCase = {b: str(lowercase ) for b, i in zip(lowercase ,range(lowercase ) )} _UpperCAmelCase = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: _UpperCAmelCase = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) _UpperCAmelCase = {} for item in rename_keys: if item[0] in original_param_names: _UpperCAmelCase = """efficientnet.""" + item[1] _UpperCAmelCase = """classifier.weight""" _UpperCAmelCase = """classifier.bias""" return key_mapping def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue _UpperCAmelCase = key_mapping[key] if "_conv" in key and "kernel" in key: _UpperCAmelCase = torch.from_numpy(lowercase ).permute(3 ,2 ,0 ,1 ) elif "depthwise_kernel" in key: _UpperCAmelCase = torch.from_numpy(lowercase ).permute(2 ,3 ,0 ,1 ) elif "kernel" in key: _UpperCAmelCase = torch.from_numpy(np.transpose(lowercase ) ) else: _UpperCAmelCase = torch.from_numpy(lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase ) @torch.no_grad() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = model_classes[model_name]( include_top=lowercase ,weights="""imagenet""" ,input_tensor=lowercase ,input_shape=lowercase ,pooling=lowercase ,classes=10_00 ,classifier_activation="""softmax""" ,) _UpperCAmelCase = original_model.trainable_variables _UpperCAmelCase = original_model.non_trainable_variables _UpperCAmelCase = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: _UpperCAmelCase = param.numpy() _UpperCAmelCase = list(tf_params.keys() ) # Load HuggingFace model _UpperCAmelCase = get_efficientnet_config(lowercase ) _UpperCAmelCase = EfficientNetForImageClassification(lowercase ).eval() _UpperCAmelCase = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) _UpperCAmelCase = rename_keys(lowercase ) replace_params(lowercase ,lowercase ,lowercase ) # Initialize preprocessor and preprocess input image _UpperCAmelCase = convert_image_processor(lowercase ) _UpperCAmelCase = preprocessor(images=prepare_img() ,return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): _UpperCAmelCase = hf_model(**lowercase ) _UpperCAmelCase = outputs.logits.detach().numpy() # Original model inference _UpperCAmelCase = False _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST ) _UpperCAmelCase = image.img_to_array(lowercase ) _UpperCAmelCase = np.expand_dims(lowercase ,axis=0 ) _UpperCAmelCase = original_model.predict(lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase ,lowercase ,atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(lowercase ): os.mkdir(lowercase ) # Save converted model and image processor hf_model.save_pretrained(lowercase ) preprocessor.save_pretrained(lowercase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) _UpperCAmelCase = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowercase ) hf_model.push_to_hub(lowercase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") UpperCAmelCase__ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
289
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging A__ = logging.get_logger(__name__) class a ( lowerCAmelCase_ ): __lowerCAmelCase : Optional[int] = ['pixel_values'] def __init__( self :Dict ,__lowercase :bool = True ,__lowercase :Optional[Dict[str, int]] = None ,__lowercase :PILImageResampling = PILImageResampling.BICUBIC ,__lowercase :bool = True ,__lowercase :bool = True ,__lowercase :Union[int, float] = 1 / 2_5_5 ,__lowercase :Dict[str, int] = None ,__lowercase :bool = True ,__lowercase :Optional[Union[float, List[float]]] = None ,__lowercase :Optional[Union[float, List[float]]] = None ,**__lowercase :Tuple ,): super().__init__(**__lowerCAmelCase ) snake_case__ : str = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} snake_case__ : Union[str, Any] = get_size_dict(__lowerCAmelCase ) snake_case__ : Tuple = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} snake_case__ : List[Any] = get_size_dict(__lowerCAmelCase ,default_to_square=__lowerCAmelCase ,param_name='''crop_size''' ) snake_case__ : List[Any] = do_resize snake_case__ : Tuple = do_rescale snake_case__ : Tuple = do_normalize snake_case__ : Any = do_center_crop snake_case__ : str = crop_size snake_case__ : Optional[Any] = size snake_case__ : List[Any] = resample snake_case__ : int = rescale_factor snake_case__ : Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN snake_case__ : int = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __lowerCamelCase ( self :Optional[int] ,__lowercase :np.ndarray ,__lowercase :Dict[str, int] ,__lowercase :PILImageResampling = PILImageResampling.BILINEAR ,__lowercase :Optional[Union[str, ChannelDimension]] = None ,**__lowercase :Optional[Any] ,): snake_case__ : str = get_size_dict(__lowerCAmelCase ) if "shortest_edge" in size: snake_case__ : Dict = get_resize_output_image_size(__lowerCAmelCase ,size=size['''shortest_edge'''] ,default_to_square=__lowerCAmelCase ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: snake_case__ : Dict = (size['''height'''], size['''width''']) else: raise ValueError(F"""Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}""" ) return resize(__lowerCAmelCase ,size=__lowerCAmelCase ,resample=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase ) def __lowerCamelCase ( self :Optional[Any] ,__lowercase :np.ndarray ,__lowercase :Dict[str, int] ,__lowercase :Optional[Union[str, ChannelDimension]] = None ,**__lowercase :Any ,): snake_case__ : Optional[Any] = get_size_dict(__lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(__lowerCAmelCase ,size=(size['''height'''], size['''width''']) ,data_format=__lowerCAmelCase ,**__lowerCAmelCase ) def __lowerCamelCase ( self :int ,__lowercase :np.ndarray ,__lowercase :float ,__lowercase :Optional[Union[str, ChannelDimension]] = None ,**__lowercase :List[str] ): return rescale(__lowerCAmelCase ,scale=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase ) def __lowerCamelCase ( self :int ,__lowercase :np.ndarray ,__lowercase :Union[float, List[float]] ,__lowercase :Union[float, List[float]] ,__lowercase :Optional[Union[str, ChannelDimension]] = None ,**__lowercase :Dict ,): return normalize(__lowerCAmelCase ,mean=__lowerCAmelCase ,std=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase ) def __lowerCamelCase ( self :Dict ,__lowercase :ImageInput ,__lowercase :Optional[bool] = None ,__lowercase :Dict[str, int] = None ,__lowercase :PILImageResampling = None ,__lowercase :bool = None ,__lowercase :int = None ,__lowercase :Optional[bool] = None ,__lowercase :Optional[float] = None ,__lowercase :Optional[bool] = None ,__lowercase :Optional[Union[float, List[float]]] = None ,__lowercase :Optional[Union[float, List[float]]] = None ,__lowercase :Optional[Union[str, TensorType]] = None ,__lowercase :Union[str, ChannelDimension] = ChannelDimension.FIRST ,**__lowercase :Union[str, Any] ,): snake_case__ : Tuple = do_resize if do_resize is not None else self.do_resize snake_case__ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale snake_case__ : Tuple = do_normalize if do_normalize is not None else self.do_normalize snake_case__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case__ : int = crop_size if crop_size is not None else self.crop_size snake_case__ : List[Any] = get_size_dict(__lowerCAmelCase ,param_name='''crop_size''' ,default_to_square=__lowerCAmelCase ) snake_case__ : List[str] = resample if resample is not None else self.resample snake_case__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case__ : str = image_mean if image_mean is not None else self.image_mean snake_case__ : int = image_std if image_std is not None else self.image_std snake_case__ : Optional[int] = size if size is not None else self.size snake_case__ : Tuple = get_size_dict(__lowerCAmelCase ) if not is_batched(__lowerCAmelCase ): snake_case__ : List[Any] = [images] if not valid_images(__lowerCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. snake_case__ : Optional[int] = [to_numpy_array(__lowerCAmelCase ) for image in images] if do_resize: snake_case__ : Any = [self.resize(image=__lowerCAmelCase ,size=__lowerCAmelCase ,resample=__lowerCAmelCase ) for image in images] if do_center_crop: snake_case__ : Any = [self.center_crop(image=__lowerCAmelCase ,size=__lowerCAmelCase ) for image in images] if do_rescale: snake_case__ : Union[str, Any] = [self.rescale(image=__lowerCAmelCase ,scale=__lowerCAmelCase ) for image in images] if do_normalize: snake_case__ : List[Any] = [self.normalize(image=__lowerCAmelCase ,mean=__lowerCAmelCase ,std=__lowerCAmelCase ) for image in images] snake_case__ : Optional[int] = [to_channel_dimension_format(__lowerCAmelCase ,__lowerCAmelCase ) for image in images] snake_case__ : List[Any] = {'''pixel_values''': images} return BatchFeature(data=__lowerCAmelCase ,tensor_type=__lowerCAmelCase )
230
"""simple docstring""" from __future__ import annotations from collections import Counter from random import random class a : def __init__( self : Union[str, Any] ): _UpperCAmelCase = {} def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ): _UpperCAmelCase = {} def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : float ): if nodea not in self.connections: self.add_node(__lowerCAmelCase ) if nodea not in self.connections: self.add_node(__lowerCAmelCase ) _UpperCAmelCase = probability def lowerCAmelCase_ ( self : Optional[Any] ): return list(self.connections ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str ): _UpperCAmelCase = 0 _UpperCAmelCase = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(lowercase ,lowercase ,lowercase ) _UpperCAmelCase = Counter(graph.get_nodes() ) _UpperCAmelCase = start for _ in range(lowercase ): _UpperCAmelCase = graph.transition(lowercase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
289
0
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def UpperCamelCase ( ) -> Any: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(_lowerCAmelCase ): requests.request("""GET""", """https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""", """https://huggingface.co""", timeout=1.0 ) @pytest.mark.integration def UpperCamelCase ( ) -> List[Any]: with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""", """https://huggingface.co""" ) def UpperCamelCase ( ) -> int: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(_lowerCAmelCase ): http_head("""https://huggingface.co""" )
246
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class a ( unittest.TestCase ): def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Optional[Any]=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : List[str]=400 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=None , __lowerCAmelCase : List[str]=True , ): _UpperCAmelCase = size if size is not None else {"""shortest_edge""": 20} _UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = do_flip_channel_order def lowerCAmelCase_ ( self : List[str] ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[int] = MobileViTImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = MobileViTImageProcessingTester(self ) @property def lowerCAmelCase_ ( self : Tuple ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_flip_channel_order""" ) ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def lowerCAmelCase_ ( self : List[str] ): pass def lowerCAmelCase_ ( self : Dict ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self : str ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self : Optional[int] ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
289
0
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _A ( lowercase__ ): lowercase__ = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """_float_tensor""", """decoder.output_projection.weight""", ] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) def _A ( lowercase__ ): lowercase__ , lowercase__ = emb.weight.shape lowercase__ = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ ) lowercase__ = emb.weight.data return lin_layer def _A ( lowercase__ , lowercase__="facebook/mbart-large-en-ro" , lowercase__=False , lowercase__=False ): lowercase__ = torch.load(lowercase__ , map_location="""cpu""" )["""model"""] remove_ignore_keys_(lowercase__ ) lowercase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0] lowercase__ = MBartConfig.from_pretrained(lowercase__ , vocab_size=lowercase__ ) if mbart_aa and finetuned: lowercase__ = """relu""" lowercase__ = state_dict["""decoder.embed_tokens.weight"""] lowercase__ = MBartForConditionalGeneration(lowercase__ ) model.model.load_state_dict(lowercase__ ) if finetuned: lowercase__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem." ) parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default="facebook/mbart-large-cc25", type=str, help="Which huggingface architecture to use: mbart-large", ) parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint") parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint") __A = parser.parse_args() __A = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
164
"""simple docstring""" from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : Any = 'efficientnet' def __init__( self : Any , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 600 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2560 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.001 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : List[Any] , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = width_coefficient _UpperCAmelCase = depth_coefficient _UpperCAmelCase = depth_divisor _UpperCAmelCase = kernel_sizes _UpperCAmelCase = in_channels _UpperCAmelCase = out_channels _UpperCAmelCase = depthwise_padding _UpperCAmelCase = strides _UpperCAmelCase = num_block_repeats _UpperCAmelCase = expand_ratios _UpperCAmelCase = squeeze_expansion_ratio _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dim _UpperCAmelCase = pooling_type _UpperCAmelCase = initializer_range _UpperCAmelCase = batch_norm_eps _UpperCAmelCase = batch_norm_momentum _UpperCAmelCase = dropout_rate _UpperCAmelCase = drop_connect_rate _UpperCAmelCase = sum(__lowerCAmelCase ) * 4 class a ( lowerCAmelCase_ ): _snake_case : Dict = version.parse('1.11' ) @property def lowerCAmelCase_ ( self : Any ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase_ ( self : int ): return 1e-5
289
0
"""simple docstring""" import pytest lowerCAmelCase__ : Tuple = '__dummy_dataset1__' lowerCAmelCase__ : str = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def a_ ( ): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def a_ ( ): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = dataset_loading_script_name UpperCAmelCase__ = tmp_path / 'datasets' / script_name script_dir.mkdir(parents=lowerCamelCase ) UpperCAmelCase__ = script_dir / f'''{script_name}.py''' with open(lowerCamelCase , 'w' ) as f: f.write(lowerCamelCase ) return str(lowerCamelCase )
98
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class a : def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : str=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Optional[int]=16 , __lowerCAmelCase : Dict=36 , __lowerCAmelCase : Optional[Any]=6 , __lowerCAmelCase : List[str]=6 , __lowerCAmelCase : Union[str, Any]=6 , __lowerCAmelCase : str=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Any=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = embedding_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_hidden_groups _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Union[str, Any] ): return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Any ): _UpperCAmelCase = AlbertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ): _UpperCAmelCase = AlbertForPreTraining(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , sentence_order_label=__lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ): _UpperCAmelCase = AlbertForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ): _UpperCAmelCase = AlbertForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = AlbertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = AlbertForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = AlbertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : str = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) _snake_case : Tuple = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) _snake_case : Dict = True def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any]=False ): _UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): _UpperCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase ) _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = AlbertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Optional[int] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCAmelCase = type self.model_tester.create_and_check_model(*__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = AlbertModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_torch class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = AlbertModel.from_pretrained("""albert-base-v2""" ) _UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0] _UpperCAmelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = torch.tensor( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
289
0
'''simple docstring''' import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def a ( ): '''simple docstring''' A_ : int = argparse.ArgumentParser() parser.add_argument( """-m""" , """--pretrained_model_name_or_path""" , type=lowerCamelCase__ , default=lowerCamelCase__ , required=lowerCamelCase__ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , ) parser.add_argument( """-c""" , """--caption""" , type=lowerCamelCase__ , default="""robotic cat with wings""" , help="""Text used to generate images.""" , ) parser.add_argument( """-n""" , """--images_num""" , type=lowerCamelCase__ , default=4 , help="""How much images to generate.""" , ) parser.add_argument( """-s""" , """--seed""" , type=lowerCamelCase__ , default=42 , help="""Seed for random process.""" , ) parser.add_argument( """-ci""" , """--cuda_id""" , type=lowerCamelCase__ , default=0 , help="""cuda_id.""" , ) A_ : str = parser.parse_args() return args def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if not len(lowerCamelCase__ ) == rows * cols: raise ValueError("""The specified number of rows and columns are not correct.""" ) A_, A_ : Tuple = imgs[0].size A_ : Union[str, Any] = Image.new("""RGB""" , size=(cols * w, rows * h) ) A_, A_ : List[str] = grid.size for i, img in enumerate(lowerCamelCase__ ): grid.paste(lowerCamelCase__ , box=(i % cols * w, i // cols * h) ) return grid def a ( lowerCamelCase__ , lowerCamelCase__="robotic cat with wings" , lowerCamelCase__=7.5 , lowerCamelCase__=50 , lowerCamelCase__=1 , lowerCamelCase__=42 , ): '''simple docstring''' A_ : str = torch.Generator(pipeline.device ).manual_seed(lowerCamelCase__ ) A_ : int = pipeline( lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , ).images A_ : Optional[int] = int(math.sqrt(lowerCamelCase__ ) ) A_ : Any = image_grid(lowerCamelCase__ , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images lowerCamelCase :List[str] = parse_args() # Load models and create wrapper for stable diffusion lowerCamelCase :Optional[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''') lowerCamelCase :List[str] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''') lowerCamelCase :Optional[int] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''') lowerCamelCase :Optional[int] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''') lowerCamelCase :Dict = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) lowerCamelCase :List[Any] = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')): lowerCamelCase :List[str] = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, '''unet''', unet) else: lowerCamelCase :Optional[int] = unet.to(torch.device('''cuda''', args.cuda_id)) lowerCamelCase :int = pipeline.to(unet.device) lowerCamelCase , lowerCamelCase :Any = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split())))) lowerCamelCase :Tuple = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
206
"""simple docstring""" UpperCAmelCase__ = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" # Return True if there is node that has not iterated. _UpperCAmelCase = [False] * len(lowercase ) _UpperCAmelCase = [s] _UpperCAmelCase = True while queue: _UpperCAmelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase ) _UpperCAmelCase = True _UpperCAmelCase = u return visited[t] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = [-1] * (len(lowercase )) _UpperCAmelCase = 0 _UpperCAmelCase = [] _UpperCAmelCase = [i[:] for i in graph] # Record original cut, copy. while bfs(lowercase ,lowercase ,lowercase ,lowercase ): _UpperCAmelCase = float("""Inf""" ) _UpperCAmelCase = sink while s != source: # Find the minimum value in select path _UpperCAmelCase = min(lowercase ,graph[parent[s]][s] ) _UpperCAmelCase = parent[s] max_flow += path_flow _UpperCAmelCase = sink while v != source: _UpperCAmelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCAmelCase = parent[v] for i in range(len(lowercase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
289
0
"""simple docstring""" import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml __a = NewType("DataClass", Any) __a = NewType("DataClassType", Any) def A_ ( _lowercase ): '''simple docstring''' if isinstance(_lowercase, _lowercase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Optional[Any] = {str(_lowercase ): choice for choice in choices} return lambda _lowercase : str_to_choice.get(_lowercase, _lowercase ) def A_ ( *, _lowercase = None, _lowercase = None, _lowercase = dataclasses.MISSING, _lowercase = dataclasses.MISSING, _lowercase = None, **_lowercase, ): '''simple docstring''' if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls snake_case_ :List[str] = {} if aliases is not None: snake_case_ :Union[str, Any] = aliases if help is not None: snake_case_ :Dict = help return dataclasses.field(metadata=_lowercase, default=_lowercase, default_factory=_lowercase, **_lowercase ) class lowerCamelCase ( lowerCAmelCase_ ): '''simple docstring''' _A : Iterable[DataClassType] def __init__( self: int , snake_case: Union[DataClassType, Iterable[DataClassType]] , **snake_case: str ) -> Any: # To make the default appear when using --help if "formatter_class" not in kwargs: snake_case_ :List[Any] = ArgumentDefaultsHelpFormatter super().__init__(**__lowerCAmelCase ) if dataclasses.is_dataclass(__lowerCAmelCase ): snake_case_ :List[Any] = [dataclass_types] snake_case_ :int = list(__lowerCAmelCase ) for dtype in self.dataclass_types: self._add_dataclass_arguments(__lowerCAmelCase ) @staticmethod def lowerCAmelCase_ ( snake_case: ArgumentParser , snake_case: dataclasses.Field ) -> Dict: snake_case_ :Dict = f"""--{field.name}""" snake_case_ :Dict = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , __lowerCAmelCase ): raise RuntimeError( """Unresolved type detected, which should have been done with the help of """ """`typing.get_type_hints` method by default""" ) snake_case_ :Union[str, Any] = kwargs.pop("""aliases""" , [] ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ): snake_case_ :Union[str, Any] = [aliases] snake_case_ :str = getattr(field.type , """__origin__""" , field.type ) if origin_type is Union or (hasattr(__lowerCAmelCase , """UnionType""" ) and isinstance(__lowerCAmelCase , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(__lowerCAmelCase ) not in field.type.__args__ ): raise ValueError( """Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because""" """ the argument parser only supports one type per argument.""" f""" Problem encountered in field \'{field.name}\'.""" ) if type(__lowerCAmelCase ) not in field.type.__args__: # filter `str` in Union snake_case_ :int = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] snake_case_ :Optional[Any] = getattr(field.type , """__origin__""" , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) snake_case_ :Any = ( field.type.__args__[0] if isinstance(__lowerCAmelCase , field.type.__args__[1] ) else field.type.__args__[1] ) snake_case_ :Union[str, Any] = getattr(field.type , """__origin__""" , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) snake_case_ :Optional[Any] = {} if origin_type is Literal or (isinstance(field.type , __lowerCAmelCase ) and issubclass(field.type , __lowerCAmelCase )): if origin_type is Literal: snake_case_ :Any = field.type.__args__ else: snake_case_ :Tuple = [x.value for x in field.type] snake_case_ :Optional[Any] = make_choice_type_function(kwargs["""choices"""] ) if field.default is not dataclasses.MISSING: snake_case_ :Tuple = field.default else: snake_case_ :str = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument snake_case_ :Dict = copy(__lowerCAmelCase ) # Hack because type=bool in argparse does not behave as we want. snake_case_ :List[Any] = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. snake_case_ :Union[str, Any] = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way snake_case_ :Optional[Any] = default # This tells argparse we accept 0 or 1 value after --field_name snake_case_ :Optional[int] = """?""" # This is the value that will get picked if we do --field_name (without value) snake_case_ :int = True elif isclass(__lowerCAmelCase ) and issubclass(__lowerCAmelCase , __lowerCAmelCase ): snake_case_ :Tuple = field.type.__args__[0] snake_case_ :Any = """+""" if field.default_factory is not dataclasses.MISSING: snake_case_ :Optional[Any] = field.default_factory() elif field.default is dataclasses.MISSING: snake_case_ :Optional[Any] = True else: snake_case_ :int = field.type if field.default is not dataclasses.MISSING: snake_case_ :str = field.default elif field.default_factory is not dataclasses.MISSING: snake_case_ :str = field.default_factory() else: snake_case_ :Optional[int] = True parser.add_argument(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): snake_case_ :List[str] = False parser.add_argument(f"""--no_{field.name}""" , action="""store_false""" , dest=field.name , **__lowerCAmelCase ) def lowerCAmelCase_ ( self: Any , snake_case: DataClassType ) -> List[Any]: if hasattr(__lowerCAmelCase , """_argument_group_name""" ): snake_case_ :Optional[int] = self.add_argument_group(dtype._argument_group_name ) else: snake_case_ :int = self try: snake_case_ :Tuple = get_type_hints(__lowerCAmelCase ) except NameError: raise RuntimeError( f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ """removing line of `from __future__ import annotations` which opts in Postponed """ """Evaluation of Annotations (PEP 563)""" ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__lowerCAmelCase ): snake_case_ :List[str] = """.""".join(map(__lowerCAmelCase , sys.version_info[:3] ) ) raise RuntimeError( f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ """line of `from __future__ import annotations` which opts in union types as """ """`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """ """support Python versions that lower than 3.10, you need to use """ """`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """ """`X | None`.""" ) from ex raise for field in dataclasses.fields(__lowerCAmelCase ): if not field.init: continue snake_case_ :str = type_hints[field.name] self._parse_dataclass_field(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self: Any , snake_case: List[str]=None , snake_case: Any=False , snake_case: Dict=True , snake_case: str=None , snake_case: Optional[int]=None , ) -> List[Any]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): snake_case_ :Optional[int] = [] if args_filename: args_files.append(Path(__lowerCAmelCase ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values snake_case_ :List[Any] = ArgumentParser() args_file_parser.add_argument(__lowerCAmelCase , type=__lowerCAmelCase , action="""append""" ) # Use only remaining args for further parsing (remove the args_file_flag) snake_case_, snake_case_ :List[str] = args_file_parser.parse_known_args(args=__lowerCAmelCase ) snake_case_ :Optional[Any] = vars(__lowerCAmelCase ).get(args_file_flag.lstrip("""-""" ) , __lowerCAmelCase ) if cmd_args_file_paths: args_files.extend([Path(__lowerCAmelCase ) for p in cmd_args_file_paths] ) snake_case_ :Any = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last snake_case_ :Union[str, Any] = file_args + args if args is not None else file_args + sys.argv[1:] snake_case_, snake_case_ :Tuple = self.parse_known_args(args=__lowerCAmelCase ) snake_case_ :Optional[Any] = [] for dtype in self.dataclass_types: snake_case_ :str = {f.name for f in dataclasses.fields(__lowerCAmelCase ) if f.init} snake_case_ :Optional[int] = {k: v for k, v in vars(__lowerCAmelCase ).items() if k in keys} for k in keys: delattr(__lowerCAmelCase , __lowerCAmelCase ) snake_case_ :List[str] = dtype(**__lowerCAmelCase ) outputs.append(__lowerCAmelCase ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(__lowerCAmelCase ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" ) return (*outputs,) def lowerCAmelCase_ ( self: str , snake_case: Dict[str, Any] , snake_case: bool = False ) -> Optional[Any]: snake_case_ :Optional[int] = set(args.keys() ) snake_case_ :Union[str, Any] = [] for dtype in self.dataclass_types: snake_case_ :Any = {f.name for f in dataclasses.fields(__lowerCAmelCase ) if f.init} snake_case_ :List[str] = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) snake_case_ :Optional[int] = dtype(**__lowerCAmelCase ) outputs.append(__lowerCAmelCase ) if not allow_extra_keys and unused_keys: raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__lowerCAmelCase )}""" ) return tuple(__lowerCAmelCase ) def lowerCAmelCase_ ( self: Any , snake_case: str , snake_case: bool = False ) -> Dict: with open(Path(__lowerCAmelCase ) , encoding="""utf-8""" ) as open_json_file: snake_case_ :Any = json.loads(open_json_file.read() ) snake_case_ :Dict = self.parse_dict(__lowerCAmelCase , allow_extra_keys=__lowerCAmelCase ) return tuple(__lowerCAmelCase ) def lowerCAmelCase_ ( self: int , snake_case: str , snake_case: bool = False ) -> Optional[int]: snake_case_ :Optional[Any] = self.parse_dict(yaml.safe_load(Path(__lowerCAmelCase ).read_text() ) , allow_extra_keys=__lowerCAmelCase ) return tuple(__lowerCAmelCase )
66
"""simple docstring""" import math class a : def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ): _UpperCAmelCase = 0.0 _UpperCAmelCase = 0.0 for i in range(len(__lowerCAmelCase ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : list[list[int | float]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : float ): for i in range(len(__lowerCAmelCase ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def __UpperCAmelCase ( ): """simple docstring""" # Training Examples ( m, n ) _UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _UpperCAmelCase = SelfOrganizingMap() _UpperCAmelCase = 3 _UpperCAmelCase = 0.5 for _ in range(lowercase ): for j in range(len(lowercase ) ): # training sample _UpperCAmelCase = training_samples[j] # Compute the winning vector _UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase ) # Update the winning vector _UpperCAmelCase = self_organizing_map.update(lowercase ,lowercase ,lowercase ,lowercase ) # classify test sample _UpperCAmelCase = [0, 0, 0, 1] _UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase ) # results print(f'''Clusters that the test sample belongs to : {winner}''' ) print(f'''Weights that have been trained : {weights}''' ) # running the main() function if __name__ == "__main__": main()
289
0
'''simple docstring''' def a ( __a ) -> Any: '''simple docstring''' return [ { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], }, { 0: [6], 1: [9], 2: [4, 5], 3: [4], 4: [2, 3], 5: [2], 6: [0, 7], 7: [6], 8: [], 9: [1], }, { 0: [4], 1: [6], 2: [], 3: [5, 6, 7], 4: [0, 6], 5: [3, 8, 9], 6: [1, 3, 4, 7], 7: [3, 6, 8, 9], 8: [5, 7], 9: [5, 7], }, { 0: [1, 3], 1: [0, 2, 4], 2: [1, 3, 4], 3: [0, 2, 4], 4: [1, 2, 3], }, ][index] def a ( __a ) -> Tuple: '''simple docstring''' UpperCamelCase__ :Tuple = 0 UpperCamelCase__ :Union[str, Any] = len(__a ) # No of vertices in graph UpperCamelCase__ :Dict = [0] * n UpperCamelCase__ :int = [False] * n def dfs(__a , __a , __a , __a ): UpperCamelCase__ :Tuple = True UpperCamelCase__ :List[str] = id_ id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: dfs(__a , __a , __a , id_ ) UpperCamelCase__ :List[str] = min(low[at] , low[to] ) if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at) ) else: # This edge is a back edge and cannot be a bridge UpperCamelCase__ :Any = min(low[at] , low[to] ) UpperCamelCase__ :Union[str, Any] = [] for i in range(__a ): if not visited[i]: dfs(__a , -1 , __a , id_ ) return bridges if __name__ == "__main__": import doctest doctest.testmod()
97
"""simple docstring""" import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class a : def __init__( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=99 , __lowerCAmelCase : int=64 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Union[str, Any]=64 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : str=512 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : str=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def lowerCAmelCase_ ( self : Union[str, Any] ): return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Optional[int] ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ): _UpperCAmelCase = MPNetModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ): _UpperCAmelCase = MPNetForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = MPNetForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = MPNetForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = MPNetForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.prepare_config_and_inputs() ((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : List[Any] = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) _snake_case : Union[str, Any] = ( { 'feature-extraction': MPNetModel, 'fill-mask': MPNetForMaskedLM, 'question-answering': MPNetForQuestionAnswering, 'text-classification': MPNetForSequenceClassification, 'token-classification': MPNetForTokenClassification, 'zero-shot': MPNetForSequenceClassification, } if is_torch_available() else {} ) _snake_case : int = False _snake_case : List[Any] = True def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = MPNetModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Dict ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*__lowerCAmelCase ) @require_torch class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = MPNetModel.from_pretrained("""microsoft/mpnet-base""" ) _UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCAmelCase = model(__lowerCAmelCase )[0] _UpperCAmelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = torch.tensor( [[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
289
0
from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class A_ : def __init__( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any]=1_3 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Any=True , UpperCAmelCase : str=True , UpperCAmelCase : str=True , UpperCAmelCase : str=True , UpperCAmelCase : Optional[int]=9_9 , UpperCAmelCase : List[str]=3_2 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : str=3_7 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : List[Any]=5_1_2 , UpperCAmelCase : Optional[Any]=1_6 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : str=3 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=None , ) -> Optional[int]: __lowerCAmelCase: Tuple = parent __lowerCAmelCase: List[Any] = 1_3 __lowerCAmelCase: List[str] = 7 __lowerCAmelCase: List[str] = True __lowerCAmelCase: str = True __lowerCAmelCase: int = True __lowerCAmelCase: str = True __lowerCAmelCase: str = 9_9 __lowerCAmelCase: int = 3_2 __lowerCAmelCase: Dict = 2 __lowerCAmelCase: str = 4 __lowerCAmelCase: Tuple = 3_7 __lowerCAmelCase: Dict = 'gelu' __lowerCAmelCase: Any = 0.1 __lowerCAmelCase: Optional[Any] = 0.1 __lowerCAmelCase: Any = 5_1_2 __lowerCAmelCase: Union[str, Any] = 1_6 __lowerCAmelCase: List[Any] = 2 __lowerCAmelCase: List[str] = 0.02 __lowerCAmelCase: str = 3 __lowerCAmelCase: Tuple = 4 __lowerCAmelCase: int = None def UpperCAmelCase ( self : int ) -> str: __lowerCAmelCase: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase: List[Any] = None if self.use_input_mask: __lowerCAmelCase: List[str] = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase: Any = None if self.use_token_type_ids: __lowerCAmelCase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase: Union[str, Any] = None __lowerCAmelCase: List[Any] = None __lowerCAmelCase: List[Any] = None if self.use_labels: __lowerCAmelCase: Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase: str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase: List[str] = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase: Tuple = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] ) -> str: __lowerCAmelCase: Union[str, Any] = TFRoFormerModel(config=__lowerCAmelCase ) __lowerCAmelCase: Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __lowerCAmelCase: Tuple = [input_ids, input_mask] __lowerCAmelCase: Optional[Any] = model(__lowerCAmelCase ) __lowerCAmelCase: Tuple = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> Tuple: __lowerCAmelCase: Tuple = True __lowerCAmelCase: Optional[Any] = TFRoFormerForCausalLM(config=__lowerCAmelCase ) __lowerCAmelCase: List[str] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowerCAmelCase: Dict = model(__lowerCAmelCase )['logits'] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict ) -> Tuple: __lowerCAmelCase: Dict = TFRoFormerForMaskedLM(config=__lowerCAmelCase ) __lowerCAmelCase: Any = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowerCAmelCase: List[Any] = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any ) -> Tuple: __lowerCAmelCase: Dict = self.num_labels __lowerCAmelCase: List[str] = TFRoFormerForSequenceClassification(config=__lowerCAmelCase ) __lowerCAmelCase: Any = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowerCAmelCase: Tuple = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] ) -> List[Any]: __lowerCAmelCase: str = self.num_choices __lowerCAmelCase: Tuple = TFRoFormerForMultipleChoice(config=__lowerCAmelCase ) __lowerCAmelCase: List[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowerCAmelCase: Union[str, Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowerCAmelCase: Tuple = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowerCAmelCase: Tuple = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __lowerCAmelCase: Any = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] ) -> Union[str, Any]: __lowerCAmelCase: List[Any] = self.num_labels __lowerCAmelCase: int = TFRoFormerForTokenClassification(config=__lowerCAmelCase ) __lowerCAmelCase: Optional[int] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowerCAmelCase: Dict = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> List[str]: __lowerCAmelCase: int = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase ) __lowerCAmelCase: str = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowerCAmelCase: Tuple = model(__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self : List[str] ) -> List[Any]: __lowerCAmelCase: Optional[int] = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ): str = config_and_inputs __lowerCAmelCase: Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _lowercase : Optional[Any] = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) _lowercase : int = ( { 'feature-extraction': TFRoFormerModel, 'fill-mask': TFRoFormerForMaskedLM, 'question-answering': TFRoFormerForQuestionAnswering, 'text-classification': TFRoFormerForSequenceClassification, 'text-generation': TFRoFormerForCausalLM, 'token-classification': TFRoFormerForTokenClassification, 'zero-shot': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) _lowercase : Tuple = False _lowercase : Optional[Any] = False def UpperCAmelCase ( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any ) -> Optional[int]: if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def UpperCAmelCase ( self : int ) -> str: __lowerCAmelCase: Any = TFRoFormerModelTester(self ) __lowerCAmelCase: Tuple = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def UpperCAmelCase ( self : Tuple ) -> Optional[int]: self.config_tester.run_common_tests() def UpperCAmelCase ( self : Optional[Any] ) -> str: __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def UpperCAmelCase ( self : str ) -> Optional[int]: __lowerCAmelCase: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def UpperCAmelCase ( self : List[Any] ) -> Any: __lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__lowerCAmelCase ) def UpperCAmelCase ( self : Dict ) -> Any: __lowerCAmelCase: List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def UpperCAmelCase ( self : List[str] ) -> Dict: __lowerCAmelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def UpperCAmelCase ( self : Tuple ) -> Tuple: __lowerCAmelCase: int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def UpperCAmelCase ( self : Optional[int] ) -> List[str]: __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def UpperCAmelCase ( self : Any ) -> List[Any]: __lowerCAmelCase: Dict = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' ) self.assertIsNotNone(__lowerCAmelCase ) @require_tf class A_ ( unittest.TestCase ): @slow def UpperCAmelCase ( self : Dict ) -> Optional[Any]: __lowerCAmelCase: Optional[Any] = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' ) __lowerCAmelCase: Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] ) __lowerCAmelCase: Optional[int] = model(__lowerCAmelCase )[0] # TODO Replace vocab size __lowerCAmelCase: int = 5_0_0_0_0 __lowerCAmelCase: str = [1, 6, vocab_size] self.assertEqual(output.shape , __lowerCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. __lowerCAmelCase: Union[str, Any] = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) @require_tf class A_ ( unittest.TestCase ): _lowercase : str = 1E-4 def UpperCAmelCase ( self : int ) -> Optional[int]: __lowerCAmelCase: Optional[int] = tf.constant([[4, 1_0]] ) __lowerCAmelCase: List[str] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) __lowerCAmelCase: Optional[Any] = emba(input_ids.shape ) __lowerCAmelCase: Any = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance ) def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]: __lowerCAmelCase: int = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) __lowerCAmelCase: Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 ) emba([2, 1_6, 5_1_2] ) __lowerCAmelCase: Dict = emba.weight[:3, :5] tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance ) @require_tf class A_ ( unittest.TestCase ): _lowercase : int = 1E-4 def UpperCAmelCase ( self : Any ) -> Optional[int]: # 2,12,16,64 __lowerCAmelCase: List[Any] = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0 __lowerCAmelCase: int = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0 __lowerCAmelCase: int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 ) __lowerCAmelCase: Tuple = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :] __lowerCAmelCase , __lowerCAmelCase: str = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) __lowerCAmelCase: str = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) __lowerCAmelCase: Any = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
322
"""simple docstring""" UpperCAmelCase__ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) UpperCAmelCase__ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 1_2, """Pm""": 1_5, """Em""": 1_8, """Zm""": 2_1, """Ym""": 2_4, } def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = from_type.lower().strip("""s""" ) _UpperCAmelCase = to_type.lower().strip("""s""" ) _UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase ) _UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase ) if from_sanitized not in METRIC_CONVERSION: _UpperCAmelCase = ( f'''Invalid \'from_type\' value: {from_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) if to_sanitized not in METRIC_CONVERSION: _UpperCAmelCase = ( f'''Invalid \'to_type\' value: {to_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) _UpperCAmelCase = METRIC_CONVERSION[from_sanitized] _UpperCAmelCase = METRIC_CONVERSION[to_sanitized] _UpperCAmelCase = 1 if from_exponent > to_exponent: _UpperCAmelCase = from_exponent - to_exponent else: _UpperCAmelCase = -(to_exponent - from_exponent) return value * pow(10 ,lowercase ) if __name__ == "__main__": from doctest import testmod testmod()
289
0
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def snake_case_ ( *snake_case ) -> Optional[int]: if not isinstance(snake_case , snake_case ): lowercase__: Union[str, Any] = list(snake_case ) for i in range(len(snake_case ) ): lowercase__: Union[str, Any] = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def snake_case_ ( snake_case ) -> List[Any]: lowercase__: Optional[Any] = [ 'CUDA out of memory.', # CUDA OOM 'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU 'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM ] if isinstance(snake_case , snake_case ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def snake_case_ ( snake_case = None , snake_case = 1_28 ) -> Optional[int]: if function is None: return functools.partial(snake_case , starting_batch_size=snake_case ) lowercase__: Dict = starting_batch_size def decorator(*snake_case , **snake_case ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() lowercase__: List[Any] = list(inspect.signature(snake_case ).parameters.keys() ) # Guard against user error if len(snake_case ) < (len(snake_case ) + 1): lowercase__: List[str] = ', '.join([f'{arg}={value}' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( f'Batch size was passed into `{function.__name__}` as the first argument when called.' f'Remove this as the decorator already does so: `{function.__name__}({arg_str})`' ) while True: if batch_size == 0: raise RuntimeError('No executable batch size found, reached zero.' ) try: return function(snake_case , *snake_case , **snake_case ) except Exception as e: if should_reduce_batch_size(snake_case ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
196
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase__ = 1_6 UpperCAmelCase__ = 3_2 def __UpperCAmelCase ( lowercase ,lowercase = 16 ): """simple docstring""" _UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) _UpperCAmelCase = DataLoader( tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCAmelCase__ = mocked_dataloaders # noqa: F811 def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1": _UpperCAmelCase = 2 # Initialize accelerator _UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config["""lr"""] _UpperCAmelCase = int(config["""num_epochs"""] ) _UpperCAmelCase = int(config["""seed"""] ) _UpperCAmelCase = int(config["""batch_size"""] ) _UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowercase ) def inner_training_loop(lowercase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(lowercase ,lowercase ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowercase ,references=lowercase ,) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' ,lowercase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" ,) parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowercase ,lowercase ) if __name__ == "__main__": main()
289
0
'''simple docstring''' import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def UpperCamelCase_( snake_case : int ): '''simple docstring''' snake_case_ = tmp_path / "file.csv" snake_case_ = textwrap.dedent( "\\n header1,header2\n 1,2\n 10,20\n " ) with open(snake_case , "w" ) as f: f.write(snake_case ) return str(snake_case ) @pytest.fixture def UpperCamelCase_( snake_case : Dict ): '''simple docstring''' snake_case_ = tmp_path / "malformed_file.csv" snake_case_ = textwrap.dedent( "\\n header1,header2\n 1,2\n 10,20,\n " ) with open(snake_case , "w" ) as f: f.write(snake_case ) return str(snake_case ) @pytest.fixture def UpperCamelCase_( snake_case : Optional[Any] , snake_case : Tuple ): '''simple docstring''' snake_case_ = tmp_path / "csv_with_image.csv" snake_case_ = textwrap.dedent( f'\\n image\n {image_file}\n ' ) with open(snake_case , "w" ) as f: f.write(snake_case ) return str(snake_case ) @pytest.fixture def UpperCamelCase_( snake_case : int ): '''simple docstring''' snake_case_ = tmp_path / "csv_with_label.csv" snake_case_ = textwrap.dedent( "\\n label\n good\n bad\n good\n " ) with open(snake_case , "w" ) as f: f.write(snake_case ) return str(snake_case ) @pytest.fixture def UpperCamelCase_( snake_case : Dict ): '''simple docstring''' snake_case_ = tmp_path / "csv_with_int_list.csv" snake_case_ = textwrap.dedent( "\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " ) with open(snake_case , "w" ) as f: f.write(snake_case ) return str(snake_case ) def UpperCamelCase_( snake_case : Optional[Any] , snake_case : Any , snake_case : Optional[Any] ): '''simple docstring''' snake_case_ = Csv() snake_case_ = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(snake_case , match="Error tokenizing data" ): for _ in generator: pass assert any( record.levelname == "ERROR" and "Failed to read file" in record.message and os.path.basename(snake_case ) in record.message for record in caplog.records ) @require_pil def UpperCamelCase_( snake_case : int ): '''simple docstring''' with open(snake_case , encoding="utf-8" ) as f: snake_case_ = f.read().splitlines()[1] snake_case_ = Csv(encoding="utf-8" , features=Features({"image": Image()} ) ) snake_case_ = csv._generate_tables([[csv_file_with_image]] ) snake_case_ = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("image" ).type == Image()() snake_case_ = pa_table.to_pydict()["image"] assert generated_content == [{"path": image_file, "bytes": None}] def UpperCamelCase_( snake_case : str ): '''simple docstring''' with open(snake_case , encoding="utf-8" ) as f: snake_case_ = f.read().splitlines()[1:] snake_case_ = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) ) snake_case_ = csv._generate_tables([[csv_file_with_label]] ) snake_case_ = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )() snake_case_ = pa_table.to_pydict()["label"] assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(snake_case ) for label in labels] def UpperCamelCase_( snake_case : Any ): '''simple docstring''' snake_case_ = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda snake_case : [int(snake_case ) for i in x.split()]} ) snake_case_ = csv._generate_tables([[csv_file_with_int_list]] ) snake_case_ = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field("int_list" ).type ) snake_case_ = pa_table.to_pydict()["int_list"] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
85
"""simple docstring""" import warnings warnings.warn( """memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """ """`from accelerate import find_executable_batch_size` to avoid this warning.""", FutureWarning, )
289
0
"""simple docstring""" import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py SCREAMING_SNAKE_CASE__ = "." # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) SCREAMING_SNAKE_CASE__ = [ "Assert", "AssignVariableOp", "EmptyTensorList", "MergeV2Checkpoints", "ReadVariableOp", "ResourceGather", "RestoreV2", "SaveV2", "ShardedFilename", "StatefulPartitionedCall", "StaticRegexFullMatch", "VarHandleOp", ] def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' lowerCAmelCase = SavedModel() lowerCAmelCase = [] with open(os.path.join(SCREAMING_SNAKE_CASE , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f: lowerCAmelCase = json.load(SCREAMING_SNAKE_CASE )["""opsets"""] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(SCREAMING_SNAKE_CASE )] ) with open(SCREAMING_SNAKE_CASE , """rb""" ) as f: saved_model.ParseFromString(f.read() ) lowerCAmelCase = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want lowerCAmelCase = sorted(SCREAMING_SNAKE_CASE ) lowerCAmelCase = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(SCREAMING_SNAKE_CASE ) if strict and len(SCREAMING_SNAKE_CASE ) > 0: raise Exception(F'Found the following incompatible ops for the opset {opset}:\n' + incompatible_ops ) elif len(SCREAMING_SNAKE_CASE ) > 0: print(F'Found the following incompatible ops for the opset {opset}:' ) print(*SCREAMING_SNAKE_CASE , sep="""\n""" ) else: print(F'The saved model {saved_model_path} can properly be converted with ONNX.' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).") parser.add_argument( "--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested." ) parser.add_argument( "--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model." ) parser.add_argument( "--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)" ) SCREAMING_SNAKE_CASE__ = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
46
"""simple docstring""" import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin UpperCAmelCase__ = logging.get_logger(__name__) enable_full_determinism() class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[int] = UNetaDModel _snake_case : List[str] = 'sample' @property def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (32, 32) _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : List[Any] ): return (3, 32, 32) @property def lowerCAmelCase_ ( self : Optional[Any] ): return (3, 32, 32) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = { """block_out_channels""": (32, 64), """down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""), """up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""), """attention_head_dim""": 3, """out_channels""": 3, """in_channels""": 3, """layers_per_block""": 2, """sample_size""": 32, } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : int = UNetaDModel _snake_case : Optional[Any] = 'sample' @property def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = 4 _UpperCAmelCase = 4 _UpperCAmelCase = (32, 32) _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : Optional[Any] ): return (4, 32, 32) @property def lowerCAmelCase_ ( self : Dict ): return (4, 32, 32) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = { """sample_size""": 32, """in_channels""": 4, """out_channels""": 4, """layers_per_block""": 2, """block_out_channels""": (32, 64), """attention_head_dim""": 32, """down_block_types""": ("""DownBlock2D""", """DownBlock2D"""), """up_block_types""": ("""UpBlock2D""", """UpBlock2D"""), } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) _UpperCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) model.to(__lowerCAmelCase ) _UpperCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self : str ): # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) model_accelerate.to(__lowerCAmelCase ) model_accelerate.eval() _UpperCAmelCase = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) _UpperCAmelCase = noise.to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase ) _UpperCAmelCase = model_accelerate(__lowerCAmelCase , __lowerCAmelCase )["""sample"""] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained( """fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase , low_cpu_mem_usage=__lowerCAmelCase ) model_normal_load.to(__lowerCAmelCase ) model_normal_load.eval() _UpperCAmelCase = model_normal_load(__lowerCAmelCase , __lowerCAmelCase )["""sample"""] assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ) model.eval() model.to(__lowerCAmelCase ) _UpperCAmelCase = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) _UpperCAmelCase = noise.to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) ) class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[Any] = UNetaDModel _snake_case : str = 'sample' @property def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str=(32, 32) ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : Any ): return (3, 32, 32) @property def lowerCAmelCase_ ( self : Union[str, Any] ): return (3, 32, 32) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = { """block_out_channels""": [32, 64, 64, 64], """in_channels""": 3, """layers_per_block""": 1, """out_channels""": 3, """time_embedding_type""": """fourier""", """norm_eps""": 1e-6, """mid_block_scale_factor""": math.sqrt(2.0 ), """norm_num_groups""": None, """down_block_types""": [ """SkipDownBlock2D""", """AttnSkipDownBlock2D""", """SkipDownBlock2D""", """SkipDownBlock2D""", ], """up_block_types""": [ """SkipUpBlock2D""", """SkipUpBlock2D""", """AttnSkipUpBlock2D""", """SkipUpBlock2D""", ], } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict @slow def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) _UpperCAmelCase = self.dummy_input _UpperCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(__lowerCAmelCase ) _UpperCAmelCase = noise _UpperCAmelCase = model(**__lowerCAmelCase ) assert image is not None, "Make sure output is not None" @slow def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ) model.to(__lowerCAmelCase ) _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (256, 256) _UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" ) model.to(__lowerCAmelCase ) _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (32, 32) _UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) def lowerCAmelCase_ ( self : List[str] ): # not required for this model pass
289
0
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin A__ = ''' Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] ''' class a ( unittest.TestCase , lowerCAmelCase_ ): def __lowerCamelCase ( self :int ): snake_case__ : Optional[Any] = load_tool('''text-question-answering''' ) self.tool.setup() snake_case__ : Any = load_tool('''text-question-answering''' ,remote=__lowerCAmelCase ) def __lowerCamelCase ( self :Tuple ): snake_case__ : str = self.tool(__lowerCAmelCase ,'''What did Hugging Face do in April 2021?''' ) self.assertEqual(__lowerCAmelCase ,'''launched the BigScience Research Workshop''' ) def __lowerCamelCase ( self :Dict ): snake_case__ : Tuple = self.remote_tool(__lowerCAmelCase ,'''What did Hugging Face do in April 2021?''' ) self.assertEqual(__lowerCAmelCase ,'''launched the BigScience Research Workshop''' ) def __lowerCamelCase ( self :Any ): snake_case__ : Union[str, Any] = self.tool(text=__lowerCAmelCase ,question='''What did Hugging Face do in April 2021?''' ) self.assertEqual(__lowerCAmelCase ,'''launched the BigScience Research Workshop''' ) def __lowerCamelCase ( self :Union[str, Any] ): snake_case__ : Dict = self.remote_tool(text=__lowerCAmelCase ,question='''What did Hugging Face do in April 2021?''' ) self.assertEqual(__lowerCAmelCase ,'''launched the BigScience Research Workshop''' )
230
"""simple docstring""" import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : int = StableUnCLIPPipeline _snake_case : str = TEXT_TO_IMAGE_PARAMS _snake_case : Any = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS _snake_case : str = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _snake_case : str = False def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = 32 _UpperCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) _UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=__lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) _UpperCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__lowerCAmelCase , num_layers=1 , ) torch.manual_seed(0 ) _UpperCAmelCase = DDPMScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowerCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , ) # regular denoising components torch.manual_seed(0 ) _UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=__lowerCAmelCase ) _UpperCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) _UpperCAmelCase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCAmelCase , layers_per_block=1 , upcast_attention=__lowerCAmelCase , use_linear_projection=__lowerCAmelCase , ) torch.manual_seed(0 ) _UpperCAmelCase = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , ) torch.manual_seed(0 ) _UpperCAmelCase = AutoencoderKL() _UpperCAmelCase = { # prior components """prior_tokenizer""": prior_tokenizer, """prior_text_encoder""": prior_text_encoder, """prior""": prior, """prior_scheduler""": prior_scheduler, # image noising components """image_normalizer""": image_normalizer, """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder, """unet""": unet, """scheduler""": scheduler, """vae""": vae, } return components def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=0 ): if str(__lowerCAmelCase ).startswith("""mps""" ): _UpperCAmelCase = torch.manual_seed(__lowerCAmelCase ) else: _UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _UpperCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """prior_num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = torch_device == """cpu""" self._test_attention_slicing_forward_pass(test_max_difference=__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=__lowerCAmelCase ) @slow @require_torch_gpu class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : str ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" ) _UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) _UpperCAmelCase = pipe("""anime turle""" , generator=__lowerCAmelCase , output_type="""np""" ) _UpperCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) _UpperCAmelCase = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _UpperCAmelCase = pipe( """anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , ) _UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
289
0
"""simple docstring""" from __future__ import annotations import math import numpy as np from numpy.linalg import norm def UpperCamelCase ( _lowerCAmelCase : List[str], _lowerCAmelCase : Optional[int] ) -> str: return math.sqrt(sum(pow(a - b, 2 ) for a, b in zip(_lowerCAmelCase, _lowerCAmelCase ) ) ) def UpperCamelCase ( _lowerCAmelCase : str, _lowerCAmelCase : Dict ) -> Optional[Any]: if dataset.ndim != value_array.ndim: _UpperCAmelCase : Any = ( """Wrong input data's dimensions... """ f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}''' ) raise ValueError(_lowerCAmelCase ) try: if dataset.shape[1] != value_array.shape[1]: _UpperCAmelCase : List[Any] = ( """Wrong input data's shape... """ f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}''' ) raise ValueError(_lowerCAmelCase ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("""Wrong shape""" ) if dataset.dtype != value_array.dtype: _UpperCAmelCase : Any = ( """Input data have different datatype... """ f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}''' ) raise TypeError(_lowerCAmelCase ) _UpperCAmelCase : List[str] = [] for value in value_array: _UpperCAmelCase : Any = euclidean(_lowerCAmelCase, dataset[0] ) _UpperCAmelCase : Tuple = dataset[0].tolist() for dataset_value in dataset[1:]: _UpperCAmelCase : int = euclidean(_lowerCAmelCase, _lowerCAmelCase ) if dist > temp_dist: _UpperCAmelCase : Optional[Any] = temp_dist _UpperCAmelCase : List[Any] = dataset_value.tolist() answer.append([vector, dist] ) return answer def UpperCamelCase ( _lowerCAmelCase : List[str], _lowerCAmelCase : Optional[int] ) -> Optional[int]: return np.dot(_lowerCAmelCase, _lowerCAmelCase ) / (norm(_lowerCAmelCase ) * norm(_lowerCAmelCase )) if __name__ == "__main__": import doctest doctest.testmod()
246
"""simple docstring""" from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
289
0
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class A ( lowerCAmelCase_ ): def __init__( self ) -> Optional[int]: '''simple docstring''' self.test() def A__ ( self ) -> int: '''simple docstring''' lowercase__ = 0 lowercase__ = False while not completed: if counter == 1: self.reset() lowercase__ = self.advance() if not self.does_advance(__lowerCAmelCase ): raise Exception( """Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" ) lowercase__ , lowercase__ , lowercase__ = self.update(__lowerCAmelCase ) counter += 1 if counter > 10_000: raise Exception("""update() does not fulfill the constraint.""" ) if self.remaining() != 0: raise Exception("""Custom Constraint is not defined correctly.""" ) @abstractmethod def A__ ( self ) -> Union[str, Any]: '''simple docstring''' raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def A__ ( self , lowerCamelCase__ ) -> Tuple: '''simple docstring''' raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def A__ ( self , lowerCamelCase__ ) -> Dict: '''simple docstring''' raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def A__ ( self ) -> str: '''simple docstring''' raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def A__ ( self ) -> List[str]: '''simple docstring''' raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def A__ ( self , lowerCamelCase__=False ) -> Optional[int]: '''simple docstring''' raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class A ( lowerCAmelCase_ ): def __init__( self , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' super(__lowerCAmelCase , self ).__init__() if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or len(__lowerCAmelCase ) == 0: raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) lowercase__ = token_ids lowercase__ = len(self.token_ids ) lowercase__ = -1 # the index of the currently fulfilled step lowercase__ = False def A__ ( self ) -> str: '''simple docstring''' if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def A__ ( self , lowerCamelCase__ ) -> Tuple: '''simple docstring''' if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(__lowerCAmelCase )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def A__ ( self , lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(__lowerCAmelCase )}''' ) lowercase__ = False lowercase__ = False lowercase__ = False if self.does_advance(__lowerCAmelCase ): self.fulfilled_idx += 1 lowercase__ = True if self.fulfilled_idx == (self.seqlen - 1): lowercase__ = True lowercase__ = completed else: # failed to make progress. lowercase__ = True self.reset() return stepped, completed, reset def A__ ( self ) -> Optional[int]: '''simple docstring''' lowercase__ = False lowercase__ = 0 def A__ ( self ) -> int: '''simple docstring''' return self.seqlen - (self.fulfilled_idx + 1) def A__ ( self , lowerCamelCase__=False ) -> Optional[Any]: '''simple docstring''' lowercase__ = PhrasalConstraint(self.token_ids ) if stateful: lowercase__ = self.seqlen lowercase__ = self.fulfilled_idx lowercase__ = self.completed return new_constraint class A : def __init__( self , lowerCamelCase__ , lowerCamelCase__=True ) -> Optional[int]: '''simple docstring''' lowercase__ = max([len(__lowerCAmelCase ) for one in nested_token_ids] ) lowercase__ = {} for token_ids in nested_token_ids: lowercase__ = root for tidx, token_id in enumerate(__lowerCAmelCase ): if token_id not in level: lowercase__ = {} lowercase__ = level[token_id] if no_subsets and self.has_subsets(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError( """Each list in `nested_token_ids` can't be a complete subset of another list, but is""" F''' {nested_token_ids}.''' ) lowercase__ = root def A__ ( self , lowerCamelCase__ ) -> List[Any]: '''simple docstring''' lowercase__ = self.trie for current_token in current_seq: lowercase__ = start[current_token] lowercase__ = list(start.keys() ) return next_tokens def A__ ( self , lowerCamelCase__ ) -> Any: '''simple docstring''' lowercase__ = self.next_tokens(__lowerCAmelCase ) return len(__lowerCAmelCase ) == 0 def A__ ( self , lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' lowercase__ = list(root.values() ) if len(__lowerCAmelCase ) == 0: return 1 else: return sum([self.count_leaves(__lowerCAmelCase ) for nn in next_nodes] ) def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Dict: '''simple docstring''' lowercase__ = self.count_leaves(__lowerCAmelCase ) return len(__lowerCAmelCase ) != leaf_count class A ( lowerCAmelCase_ ): def __init__( self , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' super(__lowerCAmelCase , self ).__init__() if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or len(__lowerCAmelCase ) == 0: raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(__lowerCAmelCase , __lowerCAmelCase ) for token_ids in nested_token_ids ): raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) lowercase__ = DisjunctiveTrie(__lowerCAmelCase ) lowercase__ = nested_token_ids lowercase__ = self.trie.max_height lowercase__ = [] lowercase__ = False def A__ ( self ) -> Tuple: '''simple docstring''' lowercase__ = self.trie.next_tokens(self.current_seq ) if len(__lowerCAmelCase ) == 0: return None else: return token_list def A__ ( self , lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowerCAmelCase )}''' ) lowercase__ = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def A__ ( self , lowerCamelCase__ ) -> int: '''simple docstring''' if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowerCAmelCase )}''' ) lowercase__ = False lowercase__ = False lowercase__ = False if self.does_advance(__lowerCAmelCase ): self.current_seq.append(__lowerCAmelCase ) lowercase__ = True else: lowercase__ = True self.reset() lowercase__ = self.trie.reached_leaf(self.current_seq ) lowercase__ = completed return stepped, completed, reset def A__ ( self ) -> Optional[Any]: '''simple docstring''' lowercase__ = False lowercase__ = [] def A__ ( self ) -> Dict: '''simple docstring''' if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def A__ ( self , lowerCamelCase__=False ) -> Optional[Any]: '''simple docstring''' lowercase__ = DisjunctiveConstraint(self.token_ids ) if stateful: lowercase__ = self.seqlen lowercase__ = self.current_seq lowercase__ = self.completed return new_constraint class A : def __init__( self , lowerCamelCase__ ) -> Tuple: '''simple docstring''' lowercase__ = constraints # max # of steps required to fulfill a given constraint lowercase__ = max([c.seqlen for c in constraints] ) lowercase__ = len(__lowerCAmelCase ) lowercase__ = False self.init_state() def A__ ( self ) -> int: '''simple docstring''' lowercase__ = [] lowercase__ = None lowercase__ = [constraint.copy(stateful=__lowerCAmelCase ) for constraint in self.constraints] def A__ ( self ) -> Optional[Any]: '''simple docstring''' lowercase__ = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def A__ ( self ) -> Any: '''simple docstring''' lowercase__ = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" lowercase__ = constraint.advance() if isinstance(__lowerCAmelCase , __lowerCAmelCase ): token_list.append(__lowerCAmelCase ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): token_list.extend(__lowerCAmelCase ) else: lowercase__ = self.inprogress_constraint.advance() if isinstance(__lowerCAmelCase , __lowerCAmelCase ): token_list.append(__lowerCAmelCase ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): token_list.extend(__lowerCAmelCase ) if len(__lowerCAmelCase ) == 0: return None else: return token_list def A__ ( self , lowerCamelCase__ ) -> Dict: '''simple docstring''' self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint lowercase__ , lowercase__ = self.add(__lowerCAmelCase ) # the entire list of constraints are fulfilled if self.completed: break def A__ ( self , lowerCamelCase__ ) -> int: '''simple docstring''' if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' ) lowercase__ , lowercase__ = False, False if self.completed: lowercase__ = True lowercase__ = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state lowercase__ , lowercase__ , lowercase__ = self.inprogress_constraint.update(__lowerCAmelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__lowerCAmelCase ) ) lowercase__ = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) lowercase__ = None if len(self.pending_constraints ) == 0: # we're done! lowercase__ = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(__lowerCAmelCase ): lowercase__ , lowercase__ , lowercase__ = pending_constraint.update(__lowerCAmelCase ) if not stepped: raise Exception( """`constraint.update(token_id)` is not yielding incremental progress, """ """even though `constraint.does_advance(token_id)` is true.""" ) if complete: self.complete_constraints.append(__lowerCAmelCase ) lowercase__ = None if not complete and stepped: lowercase__ = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". lowercase__ = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. lowercase__ = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def A__ ( self , lowerCamelCase__=True ) -> List[Any]: '''simple docstring''' lowercase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: lowercase__ = [ constraint.copy(stateful=__lowerCAmelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: lowercase__ = self.inprogress_constraint.copy(stateful=__lowerCAmelCase ) lowercase__ = [constraint.copy() for constraint in self.pending_constraints] return new_state
164
"""simple docstring""" import requests UpperCAmelCase__ = """""" # <-- Put your OpenWeatherMap appid here! UpperCAmelCase__ = """https://api.openweathermap.org/data/2.5/""" def __UpperCAmelCase ( lowercase = "Chicago" ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """weather""" ,params=locals() ).json() def __UpperCAmelCase ( lowercase = "Kolkata, India" ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """forecast""" ,params=locals() ).json() def __UpperCAmelCase ( lowercase = 55.68 ,lowercase = 12.57 ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """onecall""" ,params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: UpperCAmelCase__ = input("""Enter a location:""").strip() if location: pprint(current_weather(location)) else: break
289
0
"""simple docstring""" def a_ ( ): for n in range(1 , 1_0_0_0_0_0_0 ): yield n * (n + 1) // 2 def a_ ( lowerCamelCase ): UpperCAmelCase__ = 1 UpperCAmelCase__ = 2 while i * i <= n: UpperCAmelCase__ = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def a_ ( ): return next(i for i in triangle_number_generator() if count_divisors(lowerCamelCase ) > 5_0_0 ) if __name__ == "__main__": print(solution())
98
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = get_failure_array(lowercase ) # 2) Step through text searching for pattern _UpperCAmelCase , _UpperCAmelCase = 0, 0 # index into text, pattern while i < len(lowercase ): if pattern[j] == text[i]: if j == (len(lowercase ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: _UpperCAmelCase = failure[j - 1] continue i += 1 return False def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [0] _UpperCAmelCase = 0 _UpperCAmelCase = 1 while j < len(lowercase ): if pattern[i] == pattern[j]: i += 1 elif i > 0: _UpperCAmelCase = failure[i - 1] continue j += 1 failure.append(lowercase ) return failure if __name__ == "__main__": # Test 1) UpperCAmelCase__ = """abc1abc12""" UpperCAmelCase__ = """alskfjaldsabc1abc1abc12k23adsfabcabc""" UpperCAmelCase__ = """alskfjaldsk23adsfabcabc""" assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) UpperCAmelCase__ = """ABABX""" UpperCAmelCase__ = """ABABZABABYABABX""" assert kmp(pattern, text) # Test 3) UpperCAmelCase__ = """AAAB""" UpperCAmelCase__ = """ABAAAAAB""" assert kmp(pattern, text) # Test 4) UpperCAmelCase__ = """abcdabcy""" UpperCAmelCase__ = """abcxabcdabxabcdabcdabcy""" assert kmp(pattern, text) # Test 5) UpperCAmelCase__ = """aabaabaaa""" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
289
0
'''simple docstring''' def a ( lowerCamelCase__ , lowerCamelCase__ = False ): '''simple docstring''' if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Dict = f'Expected string as input, found {type(lowerCamelCase__ )}' raise ValueError(lowerCamelCase__ ) if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Optional[Any] = f'Expected boolean as use_pascal parameter, found {type(lowerCamelCase__ )}' raise ValueError(lowerCamelCase__ ) A_ : Union[str, Any] = input_str.split("""_""" ) A_ : Dict = 0 if use_pascal else 1 A_ : Dict = words[start_index:] A_ : Dict = [word[0].upper() + word[1:] for word in words_to_capitalize] A_ : Dict = """""" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
206
"""simple docstring""" from sklearn.metrics import recall_score import datasets UpperCAmelCase__ = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ UpperCAmelCase__ = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ UpperCAmelCase__ = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): def lowerCAmelCase_ ( self : Tuple ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]="binary" , __lowerCAmelCase : Any=None , __lowerCAmelCase : int="warn" , ): _UpperCAmelCase = recall_score( __lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase , zero_division=__lowerCAmelCase , ) return {"recall": float(__lowerCAmelCase ) if score.size == 1 else score}
289
0
"""simple docstring""" import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCamelCase ( lowerCAmelCase_ ): '''simple docstring''' _A : str = ['image_processor', 'tokenizer'] _A : Union[str, Any] = 'LayoutLMv2ImageProcessor' _A : Optional[int] = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast') def __init__( self: Tuple , snake_case: Tuple=None , snake_case: Optional[int]=None , **snake_case: List[Any] ) -> Union[str, Any]: if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __lowerCAmelCase , ) snake_case_ :Optional[Any] = kwargs.pop("""feature_extractor""" ) snake_case_ :Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__lowerCAmelCase , __lowerCAmelCase ) def __call__( self: Tuple , snake_case: str , snake_case: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , snake_case: Union[List[List[int]], List[List[List[int]]]] = None , snake_case: Optional[Union[List[int], List[List[int]]]] = None , snake_case: bool = True , snake_case: Union[bool, str, PaddingStrategy] = False , snake_case: Union[bool, str, TruncationStrategy] = None , snake_case: Optional[int] = None , snake_case: int = 0 , snake_case: Optional[int] = None , snake_case: Optional[bool] = None , snake_case: Optional[bool] = None , snake_case: bool = False , snake_case: bool = False , snake_case: bool = False , snake_case: bool = False , snake_case: bool = True , snake_case: Optional[Union[str, TensorType]] = None , **snake_case: Union[str, Any] , ) -> Optional[Any]: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( """You cannot provide bounding boxes """ """if you initialized the image processor with apply_ocr set to True.""" ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( """You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" ) # first, apply the image processor snake_case_ :Union[str, Any] = self.image_processor(images=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(__lowerCAmelCase , __lowerCAmelCase ): snake_case_ :Dict = [text] # add batch dimension (as the image processor always adds a batch dimension) snake_case_ :int = features["""words"""] snake_case_ :int = self.tokenizer( text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , ) # add pixel values snake_case_ :Optional[Any] = features.pop("""pixel_values""" ) if return_overflowing_tokens is True: snake_case_ :str = self.get_overflowing_images(__lowerCAmelCase , encoded_inputs["""overflow_to_sample_mapping"""] ) snake_case_ :int = images return encoded_inputs def lowerCAmelCase_ ( self: List[str] , snake_case: List[str] , snake_case: Dict ) -> Optional[Any]: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image snake_case_ :str = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(__lowerCAmelCase ) != len(__lowerCAmelCase ): raise ValueError( """Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got""" f""" {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}""" ) return images_with_overflow def lowerCAmelCase_ ( self: Union[str, Any] , *snake_case: List[Any] , **snake_case: int ) -> List[str]: return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def lowerCAmelCase_ ( self: int , *snake_case: List[str] , **snake_case: Optional[int] ) -> Tuple: return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def lowerCAmelCase_ ( self: Tuple ) -> Any: return ["input_ids", "bbox", "attention_mask", "image"] @property def lowerCAmelCase_ ( self: Any ) -> Optional[Any]: warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __lowerCAmelCase , ) return self.image_processor_class @property def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]: warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __lowerCAmelCase , ) return self.image_processor
66
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase__ = """platform""" import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class a : _snake_case : Tuple = PegasusConfig _snake_case : int = {} _snake_case : str = 'gelu' def __init__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=99 , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=20 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Any=0 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = eos_token_id _UpperCAmelCase = pad_token_id _UpperCAmelCase = bos_token_id def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) _UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) _UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _UpperCAmelCase = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return config, inputs_dict def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ): _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] ) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ): _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] ) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _UpperCAmelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase ) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ,): """simple docstring""" if attention_mask is None: _UpperCAmelCase = np.not_equal(lowercase ,config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: _UpperCAmelCase = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ), ] ,axis=-1 ,) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Dict = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) _snake_case : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () _snake_case : Optional[Any] = True _snake_case : List[str] = False _snake_case : Dict = False _snake_case : str = False def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = FlaxPegasusModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_class(__lowerCAmelCase ) @jax.jit def encode_jitted(__lowerCAmelCase : str , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Dict ): return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase ) with self.subTest("""JIT Enabled""" ): _UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCAmelCase = model_class(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) _UpperCAmelCase = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(__lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ): return model.decode( decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , ) with self.subTest("""JIT Enabled""" ): _UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCAmelCase_ ( self : Optional[int] ): for model_class_name in self.all_model_classes: _UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__lowerCAmelCase ) _UpperCAmelCase = np.ones((1, 1) ) _UpperCAmelCase = model(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) _UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) _UpperCAmelCase = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] _UpperCAmelCase = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] _UpperCAmelCase = tokenizer(__lowerCAmelCase , return_tensors="""np""" , truncation=__lowerCAmelCase , max_length=512 , padding=__lowerCAmelCase ) _UpperCAmelCase = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences _UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) assert tgt_text == decoded
289
0
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __snake_case = 16 __snake_case = 32 def a ( __a , __a = 16 ) -> int: '''simple docstring''' UpperCamelCase__ :Any = AutoTokenizer.from_pretrained('''bert-base-cased''' ) UpperCamelCase__ :int = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__a ): # max_length=None => use the model max length (it's actually the default) UpperCamelCase__ :List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCamelCase__ :Optional[int] = datasets.map( __a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCamelCase__ :Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__a ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCamelCase__ :List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCamelCase__ :Optional[int] = 16 elif accelerator.mixed_precision != "no": UpperCamelCase__ :str = 8 else: UpperCamelCase__ :List[str] = None return tokenizer.pad( __a , padding='''longest''' , max_length=__a , pad_to_multiple_of=__a , return_tensors='''pt''' , ) # Instantiate dataloaders. UpperCamelCase__ :Any = DataLoader( tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a ) UpperCamelCase__ :List[Any] = DataLoader( tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __snake_case = mocked_dataloaders # noqa: F811 def a ( __a , __a ) -> Any: '''simple docstring''' if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __a ) == "1": UpperCamelCase__ :Union[str, Any] = 2 # Initialize accelerator UpperCamelCase__ :List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCamelCase__ :List[Any] = config['''lr'''] UpperCamelCase__ :Optional[Any] = int(config['''num_epochs'''] ) UpperCamelCase__ :List[str] = int(config['''seed'''] ) UpperCamelCase__ :Any = int(config['''batch_size'''] ) UpperCamelCase__ :Dict = evaluate.load('''glue''' , '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=__a ) def inner_training_loop(__a ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(__a ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCamelCase__ :str = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__a ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCamelCase__ :Optional[int] = model.to(accelerator.device ) # Instantiate optimizer UpperCamelCase__ :str = AdamW(params=model.parameters() , lr=__a ) UpperCamelCase__ , UpperCamelCase__ :List[str] = get_dataloaders(__a , __a ) # Instantiate scheduler UpperCamelCase__ :Optional[int] = get_linear_schedule_with_warmup( optimizer=__a , num_warmup_steps=100 , num_training_steps=(len(__a ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[int] = accelerator.prepare( __a , __a , __a , __a , __a ) # Now we train the model for epoch in range(__a ): model.train() for step, batch in enumerate(__a ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCamelCase__ :int = model(**__a ) UpperCamelCase__ :List[str] = outputs.loss accelerator.backward(__a ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__a ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCamelCase__ :Union[str, Any] = model(**__a ) UpperCamelCase__ :str = outputs.logits.argmax(dim=-1 ) UpperCamelCase__ , UpperCamelCase__ :int = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__a , references=__a , ) UpperCamelCase__ :Union[str, Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , __a ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def a ( ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ :List[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__a , default=__a , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) UpperCamelCase__ :Dict = parser.parse_args() UpperCamelCase__ :Tuple = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(__a , __a ) if __name__ == "__main__": main()
97
"""simple docstring""" import math def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [] _UpperCAmelCase = 2 _UpperCAmelCase = int(math.sqrt(lowercase ) ) # Size of every segment _UpperCAmelCase = [True] * (end + 1) _UpperCAmelCase = [] while start <= end: if temp[start] is True: in_prime.append(lowercase ) for i in range(start * start ,end + 1 ,lowercase ): _UpperCAmelCase = False start += 1 prime += in_prime _UpperCAmelCase = end + 1 _UpperCAmelCase = min(2 * end ,lowercase ) while low <= n: _UpperCAmelCase = [True] * (high - low + 1) for each in in_prime: _UpperCAmelCase = math.floor(low / each ) * each if t < low: t += each for j in range(lowercase ,high + 1 ,lowercase ): _UpperCAmelCase = False for j in range(len(lowercase ) ): if temp[j] is True: prime.append(j + low ) _UpperCAmelCase = high + 1 _UpperCAmelCase = min(high + end ,lowercase ) return prime print(sieve(1_0**6))
289
0
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> str: """simple docstring""" return " ".join( ''.join(word[::-1] ) if len(SCREAMING_SNAKE_CASE ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('''Hey wollef sroirraw'''))
322
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _UpperCAmelCase = TapasConfig.from_json_file(lowercase ) # set absolute/relative position embeddings parameter _UpperCAmelCase = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "WTQ": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = True # hparam_utils.py hparams _UpperCAmelCase = 0.66_46_94 _UpperCAmelCase = 0.20_79_51 _UpperCAmelCase = 0.12_11_94 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = 0.0_35_25_13 _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = False # hparam_utils.py hparams _UpperCAmelCase = 36.45_19 _UpperCAmelCase = 0.90_34_21 _UpperCAmelCase = 2_22.0_88 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = 0.76_31_41 _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "TABFACT": _UpperCAmelCase = TapasForSequenceClassification(config=lowercase ) elif task == "MLM": _UpperCAmelCase = TapasForMaskedLM(config=lowercase ) elif task == "INTERMEDIATE_PRETRAINING": _UpperCAmelCase = TapasModel(config=lowercase ) else: raise ValueError(f'''Task {task} not supported.''' ) print(f'''Building PyTorch model from configuration: {config}''' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(lowercase ,lowercase ,lowercase ) # Save pytorch-model (weights and configuration) print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(lowercase ) # Save tokenizer files print(f'''Save tokenizer files to {pytorch_dump_path}''' ) _UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" ,model_max_length=5_12 ) tokenizer.save_pretrained(lowercase ) print("""Used relative position embeddings:""" ,model.config.reset_position_index_per_cell ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
289
0
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging __lowerCAmelCase = logging.get_logger(__name__) logging.set_verbosity_info() def snake_case_ ( snake_case , snake_case ) -> Any: if "xprophetnet" in prophetnet_checkpoint_path: lowercase__: Optional[int] = XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case ) lowercase__ , lowercase__: List[str] = XLMProphetNetForConditionalGeneration.from_pretrained( snake_case , output_loading_info=snake_case ) else: lowercase__: int = ProphetNetForConditionalGenerationOld.from_pretrained(snake_case ) lowercase__ , lowercase__: Dict = ProphetNetForConditionalGeneration.from_pretrained( snake_case , output_loading_info=snake_case ) lowercase__: Optional[int] = ['key_proj', 'value_proj', 'query_proj'] lowercase__: Any = { 'self_attn': 'ngram_self_attn', 'cross_attn': 'encoder_attn', 'cross_attn_layer_norm': 'encoder_attn_layer_norm', 'feed_forward_layer_norm': 'final_layer_norm', 'feed_forward': '', 'intermediate': 'fc1', 'output': 'fc2', 'key_proj': 'k_proj', 'query_proj': 'q_proj', 'value_proj': 'v_proj', 'word_embeddings': 'embed_tokens', 'embeddings_layer_norm': 'emb_layer_norm', 'relative_pos_embeddings': 'relative_linear', 'ngram_embeddings': 'ngram_input_embed', 'position_embeddings': 'embed_positions', } for key in loading_info["missing_keys"]: lowercase__: Union[str, Any] = key.split('.' ) if attributes[0] == "lm_head": lowercase__: Dict = prophet lowercase__: int = prophet_old else: lowercase__: List[str] = prophet.prophetnet lowercase__: Tuple = prophet_old.model lowercase__: Union[str, Any] = False for attribute in attributes: if attribute in mapping: lowercase__: str = mapping[attribute] if not hasattr(snake_case , snake_case ) and len(snake_case ) > 0: lowercase__: int = attribute elif hasattr(snake_case , snake_case ): lowercase__: int = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" lowercase__: List[Any] = old_model.weight logger.info(f'{attribute} is initialized.' ) lowercase__: Any = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" lowercase__: int = old_model.bias logger.info(f'{attribute} is initialized' ) lowercase__: Optional[Any] = True break elif attribute in special_keys and hasattr(snake_case , 'in_proj_weight' ): lowercase__: Tuple = old_model.in_proj_weight.shape[0] // 3 lowercase__: int = getattr(snake_case , snake_case ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": lowercase__: Dict = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) lowercase__: Optional[int] = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": lowercase__: int = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) lowercase__: List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": lowercase__: Dict = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) lowercase__: Optional[Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) lowercase__: Union[str, Any] = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings." lowercase__: Dict = nn.Parameter(old_model.embed_positions.weight[:5_12, :] ) lowercase__: Dict = True break if attribute.isdigit(): lowercase__: Any = model[int(snake_case )] lowercase__: Any = old_model[int(snake_case )] else: lowercase__: Dict = getattr(snake_case , snake_case ) if old_attribute == "": lowercase__: List[Any] = old_model else: if not hasattr(snake_case , snake_case ): raise ValueError(f'{old_model} does not have {old_attribute}' ) lowercase__: Optional[Any] = getattr(snake_case , snake_case ) if not is_key_init: raise ValueError(f'{key} was not correctly initialized!' ) print(f'Saving model to {pytorch_dump_folder_path}' ) prophet.save_pretrained(snake_case ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __lowerCAmelCase = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
196
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml UpperCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" def run_func(lowercase ): @wraps(lowercase ) def run_in_eager_mode(*lowercase ,**lowercase ): return func(*lowercase ,**lowercase ) @wraps(lowercase ) @tf.function(experimental_compile=lowercase ) def run_in_graph_mode(*lowercase ,**lowercase ): return func(*lowercase ,**lowercase ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = random.Random() _UpperCAmelCase = [rng.randint(0 ,vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(lowercase ,shape=(batch_size, sequence_length) ,dtype=tf.intaa ) class a ( lowerCAmelCase_ ): _snake_case : TensorFlowBenchmarkArguments _snake_case : PretrainedConfig _snake_case : str = "TensorFlow" @property def lowerCAmelCase_ ( self : Union[str, Any] ): return tf.__version__ def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): # initialize GPU on separate process _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_speed(_inference ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_speed(_train ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase ) _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_memory(_inference ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase ) _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_memory(_train ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _UpperCAmelCase = ( hasattr(__lowerCAmelCase , """architectures""" ) and isinstance(config.architectures , __lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_cls(__lowerCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _UpperCAmelCase = TF_MODEL_MAPPING[config.__class__](__lowerCAmelCase ) # encoder-decoder has vocab size saved differently _UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size _UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , training=__lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(__lowerCAmelCase , training=__lowerCAmelCase ) _UpperCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _UpperCAmelCase = ( hasattr(__lowerCAmelCase , """architectures""" ) and isinstance(config.architectures , __lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_cls(__lowerCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _UpperCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__lowerCAmelCase ) # encoder-decoder has vocab size saved differently _UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size _UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): _UpperCAmelCase = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0] _UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): _UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0] _UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables ) return gradients _UpperCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Any ): with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(__lowerCAmelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _UpperCAmelCase = timeit.repeat( __lowerCAmelCase , repeat=self.args.repeat , number=10 , ) return min(__lowerCAmelCase ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Callable[[], None] ): logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _UpperCAmelCase = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _UpperCAmelCase = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _UpperCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _UpperCAmelCase = nvml.nvmlDeviceGetMemoryInfo(__lowerCAmelCase ) _UpperCAmelCase = meminfo.used _UpperCAmelCase = Memory(__lowerCAmelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _UpperCAmelCase = None else: _UpperCAmelCase = measure_peak_memory_cpu(__lowerCAmelCase ) _UpperCAmelCase = Memory(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else memory_bytes if self.args.trace_memory_line_by_line: _UpperCAmelCase = stop_memory_tracing(__lowerCAmelCase ) if memory is None: _UpperCAmelCase = summary.total else: _UpperCAmelCase = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) return "N/A", None
289
0
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _snake_case ( lowerCAmelCase_ ): lowerCAmelCase_ : UNetaDModel lowerCAmelCase_ : KarrasVeScheduler def __init__( self , a__ , a__ ) -> Dict: '''simple docstring''' super().__init__() self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase ) @torch.no_grad() def __call__( self , a__ = 1 , a__ = 50 , a__ = None , a__ = "pil" , a__ = True , **a__ , ) -> Optional[Any]: '''simple docstring''' snake_case_ = self.unet.config.sample_size snake_case_ = (batch_size, 3, img_size, img_size) snake_case_ = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) snake_case_ = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(__lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper snake_case_ = self.scheduler.schedule[t] snake_case_ = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat snake_case_ , snake_case_ = self.scheduler.add_noise_to_input(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. snake_case_ = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev snake_case_ = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. snake_case_ = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample snake_case_ = self.scheduler.step_correct( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , step_output.prev_sample , step_output["derivative"] , ) snake_case_ = step_output.prev_sample snake_case_ = (sample / 2 + 0.5).clamp(0 , 1 ) snake_case_ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": snake_case_ = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowerCAmelCase )
85
"""simple docstring""" from math import pow def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,): """simple docstring""" if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count _UpperCAmelCase = int(pow(lowercase ,lowercase ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n _UpperCAmelCase , _UpperCAmelCase = backtrack( lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. _UpperCAmelCase , _UpperCAmelCase = backtrack( lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase ) return current_sum, solutions_count def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10): raise ValueError( """Invalid input\n""" """needed_sum must be between 1 and 1000, power between 2 and 10.""" ) return backtrack(lowercase ,lowercase ,1 ,0 ,0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
289
0
"""simple docstring""" def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' lowerCAmelCase = len(SCREAMING_SNAKE_CASE ) + 1 lowerCAmelCase = len(SCREAMING_SNAKE_CASE ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. lowerCAmelCase = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )] # since string of zero length match pattern of zero length lowerCAmelCase = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , SCREAMING_SNAKE_CASE ): lowerCAmelCase = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , SCREAMING_SNAKE_CASE ): lowerCAmelCase = dp[0][j - 2] if pattern[j - 1] == """*""" else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , SCREAMING_SNAKE_CASE ): for j in range(1 , SCREAMING_SNAKE_CASE ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": lowerCAmelCase = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: lowerCAmelCase = 1 elif pattern[j - 2] in (input_string[i - 1], "."): lowerCAmelCase = dp[i - 1][j] else: lowerCAmelCase = 0 else: lowerCAmelCase = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") SCREAMING_SNAKE_CASE__ = "aab" SCREAMING_SNAKE_CASE__ = "c*a*b" # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(f'{input_string} matches the given pattern {pattern}') else: print(f'{input_string} does not match with the given pattern {pattern}')
46
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } UpperCAmelCase__ = { """b0""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 2_2_4, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 2_4_0, """dropout_rate""": 0.2, """dw_padding""": [1_6], }, """b2""": { """hidden_dim""": 1_4_0_8, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 2_6_0, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 1_6], }, """b3""": { """hidden_dim""": 1_5_3_6, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 3_0_0, """dropout_rate""": 0.3, """dw_padding""": [5, 1_8], }, """b4""": { """hidden_dim""": 1_7_9_2, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 3_8_0, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_0_4_8, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 4_5_6, """dropout_rate""": 0.4, """dw_padding""": [1_3, 2_7], }, """b6""": { """hidden_dim""": 2_3_0_4, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 5_2_8, """dropout_rate""": 0.5, """dw_padding""": [3_1], }, """b7""": { """hidden_dim""": 2_5_6_0, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 6_0_0, """dropout_rate""": 0.5, """dw_padding""": [1_8], }, } def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = EfficientNetConfig() _UpperCAmelCase = CONFIG_MAP[model_name]["""hidden_dim"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""width_coef"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""depth_coef"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""dropout_rate"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""dw_padding"""] _UpperCAmelCase = """huggingface/label-files""" _UpperCAmelCase = """imagenet-1k-id2label.json""" _UpperCAmelCase = 10_00 _UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) ) _UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw ) return im def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = EfficientNetImageProcessor( size={"""height""": size, """width""": size} ,image_mean=[0.4_85, 0.4_56, 0.4_06] ,image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] ,do_center_crop=lowercase ,) return preprocessor def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] _UpperCAmelCase = sorted(set(lowercase ) ) _UpperCAmelCase = len(lowercase ) _UpperCAmelCase = {b: str(lowercase ) for b, i in zip(lowercase ,range(lowercase ) )} _UpperCAmelCase = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: _UpperCAmelCase = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) _UpperCAmelCase = {} for item in rename_keys: if item[0] in original_param_names: _UpperCAmelCase = """efficientnet.""" + item[1] _UpperCAmelCase = """classifier.weight""" _UpperCAmelCase = """classifier.bias""" return key_mapping def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue _UpperCAmelCase = key_mapping[key] if "_conv" in key and "kernel" in key: _UpperCAmelCase = torch.from_numpy(lowercase ).permute(3 ,2 ,0 ,1 ) elif "depthwise_kernel" in key: _UpperCAmelCase = torch.from_numpy(lowercase ).permute(2 ,3 ,0 ,1 ) elif "kernel" in key: _UpperCAmelCase = torch.from_numpy(np.transpose(lowercase ) ) else: _UpperCAmelCase = torch.from_numpy(lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase ) @torch.no_grad() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = model_classes[model_name]( include_top=lowercase ,weights="""imagenet""" ,input_tensor=lowercase ,input_shape=lowercase ,pooling=lowercase ,classes=10_00 ,classifier_activation="""softmax""" ,) _UpperCAmelCase = original_model.trainable_variables _UpperCAmelCase = original_model.non_trainable_variables _UpperCAmelCase = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: _UpperCAmelCase = param.numpy() _UpperCAmelCase = list(tf_params.keys() ) # Load HuggingFace model _UpperCAmelCase = get_efficientnet_config(lowercase ) _UpperCAmelCase = EfficientNetForImageClassification(lowercase ).eval() _UpperCAmelCase = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) _UpperCAmelCase = rename_keys(lowercase ) replace_params(lowercase ,lowercase ,lowercase ) # Initialize preprocessor and preprocess input image _UpperCAmelCase = convert_image_processor(lowercase ) _UpperCAmelCase = preprocessor(images=prepare_img() ,return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): _UpperCAmelCase = hf_model(**lowercase ) _UpperCAmelCase = outputs.logits.detach().numpy() # Original model inference _UpperCAmelCase = False _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST ) _UpperCAmelCase = image.img_to_array(lowercase ) _UpperCAmelCase = np.expand_dims(lowercase ,axis=0 ) _UpperCAmelCase = original_model.predict(lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase ,lowercase ,atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(lowercase ): os.mkdir(lowercase ) # Save converted model and image processor hf_model.save_pretrained(lowercase ) preprocessor.save_pretrained(lowercase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) _UpperCAmelCase = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowercase ) hf_model.push_to_hub(lowercase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") UpperCAmelCase__ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
289
0
class a : def __init__( self :Any ,__lowercase :Optional[Any] ): # we need a list not a string, so do something to change the type snake_case__ : List[Any] = arr.split(''',''' ) def __lowerCamelCase ( self :Any ): snake_case__ : List[Any] = [int(self.array[0] )] * len(self.array ) snake_case__ : List[str] = [int(self.array[0] )] * len(self.array ) for i in range(1 ,len(self.array ) ): snake_case__ : List[Any] = max( int(self.array[i] ) + sum_value[i - 1] ,int(self.array[i] ) ) snake_case__ : List[str] = max(sum_value[i] ,rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": A__ = input('''please input some numbers:''') A__ = SubArray(whole_array) A__ = array.solve_sub_array() print(('''the results is:''', re))
230
"""simple docstring""" from __future__ import annotations from collections import Counter from random import random class a : def __init__( self : Union[str, Any] ): _UpperCAmelCase = {} def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ): _UpperCAmelCase = {} def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : float ): if nodea not in self.connections: self.add_node(__lowerCAmelCase ) if nodea not in self.connections: self.add_node(__lowerCAmelCase ) _UpperCAmelCase = probability def lowerCAmelCase_ ( self : Optional[Any] ): return list(self.connections ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str ): _UpperCAmelCase = 0 _UpperCAmelCase = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(lowercase ,lowercase ,lowercase ) _UpperCAmelCase = Counter(graph.get_nodes() ) _UpperCAmelCase = start for _ in range(lowercase ): _UpperCAmelCase = graph.transition(lowercase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
289
0
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process lowerCamelCase__ : List[str] = logging.getLogger(__name__) @dataclass class _UpperCAmelCase : __a : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""}) __a : Optional[str] = field( default=lowerCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""}) __a : Optional[str] = field( default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""}) __a : Optional[str] = field( default=lowerCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""}) __a : bool = field(default=lowerCAmelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""}) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. __a : Optional[str] = field( default=lowerCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class _UpperCAmelCase : __a : str = field( metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""}) __a : Optional[str] = field( default=lowerCAmelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , ) __a : int = field( default=1_2_8 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __a : bool = field( default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""}) def UpperCamelCase ( ) -> str: _UpperCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""" ) _UpperCAmelCase : List[Any] = import_module("""tasks""" ) try: _UpperCAmelCase : str = getattr(_lowerCAmelCase, model_args.task_type ) _UpperCAmelCase : List[Any] = token_classification_task_clazz() except AttributeError: raise ValueError( f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ''' f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""", _lowerCAmelCase ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task _UpperCAmelCase : Tuple = token_classification_task.get_labels(data_args.labels ) _UpperCAmelCase : Union[str, Any] = dict(enumerate(_lowerCAmelCase ) ) _UpperCAmelCase : int = len(_lowerCAmelCase ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase : List[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=_lowerCAmelCase, idalabel=_lowerCAmelCase, labelaid={label: i for i, label in enumerate(_lowerCAmelCase )}, cache_dir=model_args.cache_dir, ) _UpperCAmelCase : str = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast, ) _UpperCAmelCase : List[Any] = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(""".ckpt""" in model_args.model_name_or_path ), config=_lowerCAmelCase, cache_dir=model_args.cache_dir, ) # Get datasets _UpperCAmelCase : Any = ( TokenClassificationDataset( token_classification_task=_lowerCAmelCase, data_dir=data_args.data_dir, tokenizer=_lowerCAmelCase, labels=_lowerCAmelCase, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, ) if training_args.do_train else None ) _UpperCAmelCase : List[str] = ( TokenClassificationDataset( token_classification_task=_lowerCAmelCase, data_dir=data_args.data_dir, tokenizer=_lowerCAmelCase, labels=_lowerCAmelCase, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, ) if training_args.do_eval else None ) def align_predictions(_lowerCAmelCase : Tuple, _lowerCAmelCase : Union[str, Any] ) -> Tuple[List[int], List[int]]: _UpperCAmelCase : Optional[Any] = np.argmax(_lowerCAmelCase, axis=2 ) _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = preds.shape _UpperCAmelCase : Union[str, Any] = [[] for _ in range(_lowerCAmelCase )] _UpperCAmelCase : Optional[Any] = [[] for _ in range(_lowerCAmelCase )] for i in range(_lowerCAmelCase ): for j in range(_lowerCAmelCase ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(_lowerCAmelCase : Tuple ) -> Dict: _UpperCAmelCase , _UpperCAmelCase : List[str] = align_predictions(p.predictions, p.label_ids ) return { "accuracy_score": accuracy_score(_lowerCAmelCase, _lowerCAmelCase ), "precision": precision_score(_lowerCAmelCase, _lowerCAmelCase ), "recall": recall_score(_lowerCAmelCase, _lowerCAmelCase ), "f1": fa_score(_lowerCAmelCase, _lowerCAmelCase ), } # Data collator _UpperCAmelCase : int = DataCollatorWithPadding(_lowerCAmelCase, pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer _UpperCAmelCase : List[Any] = Trainer( model=_lowerCAmelCase, args=_lowerCAmelCase, train_dataset=_lowerCAmelCase, eval_dataset=_lowerCAmelCase, compute_metrics=_lowerCAmelCase, data_collator=_lowerCAmelCase, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _UpperCAmelCase : Optional[int] = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) _UpperCAmelCase : Tuple = trainer.evaluate() _UpperCAmelCase : Tuple = os.path.join(training_args.output_dir, """eval_results.txt""" ) if trainer.is_world_process_zero(): with open(_lowerCAmelCase, """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in result.items(): logger.info(""" %s = %s""", _lowerCAmelCase, _lowerCAmelCase ) writer.write("""%s = %s\n""" % (key, value) ) results.update(_lowerCAmelCase ) # Predict if training_args.do_predict: _UpperCAmelCase : Optional[Any] = TokenClassificationDataset( token_classification_task=_lowerCAmelCase, data_dir=data_args.data_dir, tokenizer=_lowerCAmelCase, labels=_lowerCAmelCase, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.test, ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = trainer.predict(_lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase : str = align_predictions(_lowerCAmelCase, _lowerCAmelCase ) _UpperCAmelCase : int = os.path.join(training_args.output_dir, """test_results.txt""" ) if trainer.is_world_process_zero(): with open(_lowerCAmelCase, """w""" ) as writer: for key, value in metrics.items(): logger.info(""" %s = %s""", _lowerCAmelCase, _lowerCAmelCase ) writer.write("""%s = %s\n""" % (key, value) ) # Save predictions _UpperCAmelCase : Optional[int] = os.path.join(training_args.output_dir, """test_predictions.txt""" ) if trainer.is_world_process_zero(): with open(_lowerCAmelCase, """w""" ) as writer: with open(os.path.join(data_args.data_dir, """test.txt""" ), """r""" ) as f: token_classification_task.write_predictions_to_file(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) return results def UpperCamelCase ( _lowerCAmelCase : Tuple ) -> Tuple: main() if __name__ == "__main__": main()
246
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class a ( unittest.TestCase ): def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Optional[Any]=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : List[str]=400 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=None , __lowerCAmelCase : List[str]=True , ): _UpperCAmelCase = size if size is not None else {"""shortest_edge""": 20} _UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = do_flip_channel_order def lowerCAmelCase_ ( self : List[str] ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[int] = MobileViTImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = MobileViTImageProcessingTester(self ) @property def lowerCAmelCase_ ( self : Tuple ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_flip_channel_order""" ) ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def lowerCAmelCase_ ( self : List[str] ): pass def lowerCAmelCase_ ( self : Dict ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self : str ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self : Optional[int] ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
289
0
'''simple docstring''' def _A ( lowercase__ ): if isinstance(lowercase__ , lowercase__ ): raise TypeError("""'float' object cannot be interpreted as an integer""" ) if isinstance(lowercase__ , lowercase__ ): raise TypeError("""'str' object cannot be interpreted as an integer""" ) if num == 0: return "0b0" lowercase__ = False if num < 0: lowercase__ = True lowercase__ = -num lowercase__ = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(lowercase__ ) for e in binary ) return "0b" + "".join(str(lowercase__ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
164
"""simple docstring""" from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : Any = 'efficientnet' def __init__( self : Any , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 600 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2560 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.001 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : List[Any] , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = width_coefficient _UpperCAmelCase = depth_coefficient _UpperCAmelCase = depth_divisor _UpperCAmelCase = kernel_sizes _UpperCAmelCase = in_channels _UpperCAmelCase = out_channels _UpperCAmelCase = depthwise_padding _UpperCAmelCase = strides _UpperCAmelCase = num_block_repeats _UpperCAmelCase = expand_ratios _UpperCAmelCase = squeeze_expansion_ratio _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dim _UpperCAmelCase = pooling_type _UpperCAmelCase = initializer_range _UpperCAmelCase = batch_norm_eps _UpperCAmelCase = batch_norm_momentum _UpperCAmelCase = dropout_rate _UpperCAmelCase = drop_connect_rate _UpperCAmelCase = sum(__lowerCAmelCase ) * 4 class a ( lowerCAmelCase_ ): _snake_case : Dict = version.parse('1.11' ) @property def lowerCAmelCase_ ( self : Any ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase_ ( self : int ): return 1e-5
289
0
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf lowerCAmelCase__ : List[str] = logging.get_logger(__name__) @dataclass class snake_case ( lowerCAmelCase_ ): """simple docstring""" snake_case__ = [ 'no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process', ] def __init__( self : List[str] ,**lowerCamelCase__ : str ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: UpperCAmelCase__ = deprecated_arg[3:] UpperCAmelCase__ = not kwargs.pop(__lowerCAmelCase ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) UpperCAmelCase__ = kwargs.pop('tpu_name' ,self.tpu_name ) UpperCAmelCase__ = kwargs.pop('device_idx' ,self.device_idx ) UpperCAmelCase__ = kwargs.pop('eager_mode' ,self.eager_mode ) UpperCAmelCase__ = kwargs.pop('use_xla' ,self.use_xla ) super().__init__(**__lowerCAmelCase ) snake_case__ = field( default=lowerCAmelCase_ , metadata={"help": "Name of TPU"} , ) snake_case__ = field( default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , ) snake_case__ = field(default=lowerCAmelCase_ , metadata={"help": "Benchmark models in eager model."} ) snake_case__ = field( default=lowerCAmelCase_ , metadata={ "help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`." } , ) @cached_property def __lowerCAmelCase ( self : Any ): requires_backends(self ,['tf'] ) UpperCAmelCase__ = None if self.tpu: try: if self.tpu_name: UpperCAmelCase__ = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: UpperCAmelCase__ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: UpperCAmelCase__ = None return tpu @cached_property def __lowerCAmelCase ( self : str ): requires_backends(self ,['tf'] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) UpperCAmelCase__ = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'GPU' ) UpperCAmelCase__ = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([] ,'GPU' ) # disable GPU UpperCAmelCase__ = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' ) return strategy @property def __lowerCAmelCase ( self : Union[str, Any] ): requires_backends(self ,['tf'] ) return self._setup_tpu is not None @property def __lowerCAmelCase ( self : List[Any] ): requires_backends(self ,['tf'] ) return self._setup_strategy @property def __lowerCAmelCase ( self : Optional[Any] ): requires_backends(self ,['tf'] ) return tf.config.list_physical_devices('GPU' ) @property def __lowerCAmelCase ( self : Any ): requires_backends(self ,['tf'] ) if self.cuda: return len(self.gpu_list ) return 0 @property def __lowerCAmelCase ( self : Any ): return self.n_gpu > 0
98
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class a : def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : str=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Optional[int]=16 , __lowerCAmelCase : Dict=36 , __lowerCAmelCase : Optional[Any]=6 , __lowerCAmelCase : List[str]=6 , __lowerCAmelCase : Union[str, Any]=6 , __lowerCAmelCase : str=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Any=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = embedding_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_hidden_groups _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Union[str, Any] ): return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Any ): _UpperCAmelCase = AlbertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ): _UpperCAmelCase = AlbertForPreTraining(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , sentence_order_label=__lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ): _UpperCAmelCase = AlbertForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ): _UpperCAmelCase = AlbertForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = AlbertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = AlbertForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = AlbertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : str = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) _snake_case : Tuple = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) _snake_case : Dict = True def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any]=False ): _UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): _UpperCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase ) _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = AlbertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Optional[int] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCAmelCase = type self.model_tester.create_and_check_model(*__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = AlbertModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_torch class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = AlbertModel.from_pretrained("""albert-base-v2""" ) _UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0] _UpperCAmelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = torch.tensor( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
289
0
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss lowerCamelCase :Optional[Any] = pytest.mark.integration @require_faiss class _lowerCAmelCase ( lowerCAmelCase_ ): def _a (self ): A_ : List[Any] = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(__lowerCAmelCase ) for x in np.arange(30 ).tolist()]} ) return dset def _a (self ): import faiss A_ : List[Any] = self._create_dummy_dataset() A_ : int = dset.map( lambda lowercase , lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ) A_ : Union[str, Any] = dset.add_faiss_index("""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) A_, A_ : List[Any] = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" ) dset.drop_index("""vecs""" ) def _a (self ): import faiss A_ : Optional[int] = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) A_, A_ : int = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" ) def _a (self ): import faiss A_ : Optional[Any] = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__lowerCAmelCase ) as tmp_file: dset.save_faiss_index("""vecs""" , tmp_file.name ) dset.load_faiss_index("""vecs2""" , tmp_file.name ) os.unlink(tmp_file.name ) A_, A_ : Dict = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" ) def _a (self ): A_ : Tuple = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" ) dset.drop_index("""vecs""" ) self.assertRaises(__lowerCAmelCase , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) ) def _a (self ): from elasticsearch import Elasticsearch A_ : Dict = self._create_dummy_dataset() with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch( """elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk: A_ : Optional[Any] = {"""acknowledged""": True} mocked_bulk.return_value([(True, None)] * 30 ) A_ : Optional[Any] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 29}]}} A_ : Union[str, Any] = Elasticsearch() dset.add_elasticsearch_index("""filename""" , es_client=__lowerCAmelCase ) A_, A_ : Tuple = dset.get_nearest_examples("""filename""" , """my_name-train_29""" ) self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" ) @require_faiss class _lowerCAmelCase ( lowerCAmelCase_ ): def _a (self ): import faiss A_ : Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query A_ : Union[str, Any] = np.zeros(5 , dtype=np.floataa ) A_ : int = 1 A_, A_ : Tuple = index.search(__lowerCAmelCase ) self.assertRaises(__lowerCAmelCase , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries A_ : Union[str, Any] = np.eye(5 , dtype=np.floataa )[::-1] A_, A_ : Any = index.search_batch(__lowerCAmelCase ) self.assertRaises(__lowerCAmelCase , index.search_batch , queries[0] ) A_ : str = [scores[0] for scores in total_scores] A_ : Tuple = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCAmelCase ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , __lowerCAmelCase ) def _a (self ): import faiss A_ : Tuple = FaissIndex(string_factory="""Flat""" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) A_ : Dict = FaissIndex(string_factory="""LSH""" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(__lowerCAmelCase ): A_ : str = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) ) def _a (self ): import faiss A_ : List[Any] = faiss.IndexFlat(5 ) A_ : Union[str, Any] = FaissIndex(custom_index=__lowerCAmelCase ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def _a (self ): import faiss A_ : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__lowerCAmelCase ) as tmp_file: index.save(tmp_file.name ) A_ : Optional[int] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) A_ : Dict = np.zeros(5 , dtype=np.floataa ) A_ : Optional[Any] = 1 A_, A_ : Optional[int] = index.search(__lowerCAmelCase ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def a ( lowerCamelCase__ ): '''simple docstring''' import faiss A_ : List[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) A_ : int = """index.faiss""" A_ : Optional[int] = f'mock://{index_name}' index.save(lowerCamelCase__ , storage_options=mockfs.storage_options ) A_ : Tuple = FaissIndex.load(lowerCamelCase__ , storage_options=mockfs.storage_options ) A_ : List[Any] = np.zeros(5 , dtype=np.floataa ) A_ : List[str] = 1 A_, A_ : Optional[Any] = index.search(lowerCamelCase__ ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class _lowerCAmelCase ( lowerCAmelCase_ ): def _a (self ): from elasticsearch import Elasticsearch with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch( """elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk: A_ : List[str] = Elasticsearch() A_ : int = {"""acknowledged""": True} A_ : List[str] = ElasticSearchIndex(es_client=__lowerCAmelCase ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(["""foo""", """bar""", """foobar"""] ) # single query A_ : Any = """foo""" A_ : Optional[Any] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}} A_, A_ : int = index.search(__lowerCAmelCase ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout A_ : str = """foo""" A_ : Any = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}} A_, A_ : List[str] = index.search(__lowerCAmelCase , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries A_ : Any = ["""foo""", """bar""", """foobar"""] A_ : Optional[Any] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}} A_, A_ : List[str] = index.search_batch(__lowerCAmelCase ) A_ : Dict = [scores[0] for scores in total_scores] A_ : str = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCAmelCase ) , 0 ) self.assertListEqual([1, 1, 1] , __lowerCAmelCase ) # batched queries with timeout A_ : Union[str, Any] = ["""foo""", """bar""", """foobar"""] A_ : Union[str, Any] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}} A_, A_ : Optional[Any] = index.search_batch(__lowerCAmelCase , request_timeout=30 ) A_ : Optional[int] = [scores[0] for scores in total_scores] A_ : Optional[int] = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCAmelCase ) , 0 ) self.assertListEqual([1, 1, 1] , __lowerCAmelCase )
206
"""simple docstring""" UpperCAmelCase__ = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" # Return True if there is node that has not iterated. _UpperCAmelCase = [False] * len(lowercase ) _UpperCAmelCase = [s] _UpperCAmelCase = True while queue: _UpperCAmelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase ) _UpperCAmelCase = True _UpperCAmelCase = u return visited[t] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = [-1] * (len(lowercase )) _UpperCAmelCase = 0 _UpperCAmelCase = [] _UpperCAmelCase = [i[:] for i in graph] # Record original cut, copy. while bfs(lowercase ,lowercase ,lowercase ,lowercase ): _UpperCAmelCase = float("""Inf""" ) _UpperCAmelCase = sink while s != source: # Find the minimum value in select path _UpperCAmelCase = min(lowercase ,graph[parent[s]][s] ) _UpperCAmelCase = parent[s] max_flow += path_flow _UpperCAmelCase = sink while v != source: _UpperCAmelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCAmelCase = parent[v] for i in range(len(lowercase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
289
0
"""simple docstring""" import requests __a = "" # <-- Put your OpenWeatherMap appid here! __a = "https://api.openweathermap.org/data/2.5/" def A_ ( _lowercase = "Chicago", _lowercase = APPID ): '''simple docstring''' return requests.get(URL_BASE + """weather""", params=locals() ).json() def A_ ( _lowercase = "Kolkata, India", _lowercase = APPID ): '''simple docstring''' return requests.get(URL_BASE + """forecast""", params=locals() ).json() def A_ ( _lowercase = 55.68, _lowercase = 12.57, _lowercase = APPID ): '''simple docstring''' return requests.get(URL_BASE + """onecall""", params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: __a = input("Enter a location:").strip() if location: pprint(current_weather(location)) else: break
66
"""simple docstring""" import math class a : def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ): _UpperCAmelCase = 0.0 _UpperCAmelCase = 0.0 for i in range(len(__lowerCAmelCase ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : list[list[int | float]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : float ): for i in range(len(__lowerCAmelCase ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def __UpperCAmelCase ( ): """simple docstring""" # Training Examples ( m, n ) _UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _UpperCAmelCase = SelfOrganizingMap() _UpperCAmelCase = 3 _UpperCAmelCase = 0.5 for _ in range(lowercase ): for j in range(len(lowercase ) ): # training sample _UpperCAmelCase = training_samples[j] # Compute the winning vector _UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase ) # Update the winning vector _UpperCAmelCase = self_organizing_map.update(lowercase ,lowercase ,lowercase ,lowercase ) # classify test sample _UpperCAmelCase = [0, 0, 0, 1] _UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase ) # results print(f'''Clusters that the test sample belongs to : {winner}''' ) print(f'''Weights that have been trained : {weights}''' ) # running the main() function if __name__ == "__main__": main()
289
0
'''simple docstring''' from __future__ import annotations def a ( __a , __a , __a ) -> Any: '''simple docstring''' UpperCamelCase__ :Tuple = list(range(len(__a ) ) ) UpperCamelCase__ :Any = [v / w for v, w in zip(__a , __a )] index.sort(key=lambda __a : ratio[i] , reverse=__a ) UpperCamelCase__ :str = 0 UpperCamelCase__ :List[Any] = [0] * len(__a ) for i in index: if weight[i] <= capacity: UpperCamelCase__ :int = 1 max_value += value[i] capacity -= weight[i] else: UpperCamelCase__ :str = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
97
"""simple docstring""" import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class a : def __init__( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=99 , __lowerCAmelCase : int=64 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Union[str, Any]=64 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : str=512 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : str=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def lowerCAmelCase_ ( self : Union[str, Any] ): return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Optional[int] ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ): _UpperCAmelCase = MPNetModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ): _UpperCAmelCase = MPNetForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = MPNetForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = MPNetForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = MPNetForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.prepare_config_and_inputs() ((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : List[Any] = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) _snake_case : Union[str, Any] = ( { 'feature-extraction': MPNetModel, 'fill-mask': MPNetForMaskedLM, 'question-answering': MPNetForQuestionAnswering, 'text-classification': MPNetForSequenceClassification, 'token-classification': MPNetForTokenClassification, 'zero-shot': MPNetForSequenceClassification, } if is_torch_available() else {} ) _snake_case : int = False _snake_case : List[Any] = True def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = MPNetModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Dict ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*__lowerCAmelCase ) @require_torch class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = MPNetModel.from_pretrained("""microsoft/mpnet-base""" ) _UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCAmelCase = model(__lowerCAmelCase )[0] _UpperCAmelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = torch.tensor( [[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
289
0
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ ( lowerCAmelCase_ , unittest.TestCase ): _lowercase : Union[str, Any] = LongformerTokenizer _lowercase : Union[str, Any] = True _lowercase : Union[str, Any] = LongformerTokenizerFast _lowercase : Dict = True def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowerCAmelCase: List[str] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __lowerCAmelCase: Tuple = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) __lowerCAmelCase: Dict = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __lowerCAmelCase: Union[str, Any] = {'unk_token': '<unk>'} __lowerCAmelCase: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __lowerCAmelCase: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__lowerCAmelCase ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__lowerCAmelCase ) ) def UpperCAmelCase ( self : Any , **UpperCAmelCase : List[str] ) -> Tuple: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def UpperCAmelCase ( self : Optional[Any] , **UpperCAmelCase : Tuple ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def UpperCAmelCase ( self : List[str] , UpperCAmelCase : List[str] ) -> Dict: __lowerCAmelCase: List[str] = 'lower newer' __lowerCAmelCase: List[str] = 'lower newer' return input_text, output_text def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: __lowerCAmelCase: List[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) __lowerCAmelCase: Union[str, Any] = 'lower newer' __lowerCAmelCase: Union[str, Any] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] __lowerCAmelCase: Dict = tokenizer.tokenize(__lowerCAmelCase ) # , add_prefix_space=True) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __lowerCAmelCase: List[str] = tokens + [tokenizer.unk_token] __lowerCAmelCase: Union[str, Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase ) def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: __lowerCAmelCase: Tuple = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=__lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=__lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , ) @slow def UpperCAmelCase ( self : Dict ) -> str: __lowerCAmelCase: Union[str, Any] = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' ) __lowerCAmelCase: List[Any] = tokenizer.encode('sequence builders' , add_special_tokens=__lowerCAmelCase ) __lowerCAmelCase: Union[str, Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=__lowerCAmelCase ) __lowerCAmelCase: Optional[Any] = tokenizer.encode( 'sequence builders' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) __lowerCAmelCase: Dict = tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) __lowerCAmelCase: List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase ) __lowerCAmelCase: int = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: __lowerCAmelCase: Dict = self.get_tokenizer() __lowerCAmelCase: Optional[Any] = 'Encode this sequence.' __lowerCAmelCase: List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments __lowerCAmelCase: Union[str, Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) __lowerCAmelCase: Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase ) __lowerCAmelCase: int = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) __lowerCAmelCase: Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) __lowerCAmelCase: Tuple = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __lowerCAmelCase: Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase ) # Testing spaces after special tokens __lowerCAmelCase: Union[str, Any] = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )} ) # mask token has a left space __lowerCAmelCase: Optional[int] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) __lowerCAmelCase: Dict = 'Encode <mask> sequence' __lowerCAmelCase: str = 'Encode <mask>sequence' __lowerCAmelCase: Optional[Any] = tokenizer.encode(__lowerCAmelCase ) __lowerCAmelCase: List[str] = encoded.index(__lowerCAmelCase ) __lowerCAmelCase: List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) __lowerCAmelCase: Dict = tokenizer.encode(__lowerCAmelCase ) __lowerCAmelCase: int = encoded.index(__lowerCAmelCase ) __lowerCAmelCase: List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase ) def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: pass def UpperCAmelCase ( self : Union[str, Any] ) -> Dict: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __lowerCAmelCase: List[str] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __lowerCAmelCase: Dict = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __lowerCAmelCase: int = 'A, <mask> AllenNLP sentence.' __lowerCAmelCase: Any = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase ) __lowerCAmelCase: List[Any] = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) __lowerCAmelCase: List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) __lowerCAmelCase: Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual( __lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( __lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def UpperCAmelCase ( self : List[str] ) -> Dict: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): __lowerCAmelCase: int = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __lowerCAmelCase: List[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) __lowerCAmelCase: Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'] , __lowerCAmelCase ) self.assertEqual(post_processor_state['add_prefix_space'] , __lowerCAmelCase ) self.assertEqual(post_processor_state['trim_offsets'] , __lowerCAmelCase ) def UpperCAmelCase ( self : Optional[int] ) -> Tuple: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __lowerCAmelCase: Optional[Any] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` __lowerCAmelCase: int = F'''{text_of_1_token} {text_of_1_token}''' __lowerCAmelCase: Tuple = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __lowerCAmelCase: Any = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __lowerCAmelCase: List[str] = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __lowerCAmelCase: Any = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __lowerCAmelCase: str = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __lowerCAmelCase: Tuple = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __lowerCAmelCase: Tuple = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __lowerCAmelCase: Optional[Any] = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __lowerCAmelCase: Any = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) __lowerCAmelCase: Optional[int] = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __lowerCAmelCase: List[Any] = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ) + 1, 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __lowerCAmelCase: Dict = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __lowerCAmelCase: str = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __lowerCAmelCase: Any = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __lowerCAmelCase: Dict = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
322
"""simple docstring""" UpperCAmelCase__ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) UpperCAmelCase__ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 1_2, """Pm""": 1_5, """Em""": 1_8, """Zm""": 2_1, """Ym""": 2_4, } def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = from_type.lower().strip("""s""" ) _UpperCAmelCase = to_type.lower().strip("""s""" ) _UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase ) _UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase ) if from_sanitized not in METRIC_CONVERSION: _UpperCAmelCase = ( f'''Invalid \'from_type\' value: {from_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) if to_sanitized not in METRIC_CONVERSION: _UpperCAmelCase = ( f'''Invalid \'to_type\' value: {to_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) _UpperCAmelCase = METRIC_CONVERSION[from_sanitized] _UpperCAmelCase = METRIC_CONVERSION[to_sanitized] _UpperCAmelCase = 1 if from_exponent > to_exponent: _UpperCAmelCase = from_exponent - to_exponent else: _UpperCAmelCase = -(to_exponent - from_exponent) return value * pow(10 ,lowercase ) if __name__ == "__main__": from doctest import testmod testmod()
289
0
import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class __a ( lowerCAmelCase_ , unittest.TestCase ): __lowercase : Dict = CpmAntTokenizer __lowercase : List[str] = False def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]: '''simple docstring''' super().setUp() lowercase__: List[str] = [ '<d>', '</d>', '<s>', '</s>', '</_>', '<unk>', '<pad>', '</n>', '我', '是', 'C', 'P', 'M', 'A', 'n', 't', ] lowercase__: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) @tooslow def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' lowercase__: Union[str, Any] = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' ) lowercase__: Dict = '今天天气真好!' lowercase__: Optional[int] = ['今天', '天气', '真', '好', '!'] lowercase__: Optional[int] = tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) lowercase__: str = '今天天气真好!' lowercase__: Any = [tokenizer.bos_token] + tokens lowercase__: Dict = [6, 9_802, 14_962, 2_082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase ) lowercase__: int = tokenizer.decode(__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
196
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase__ = 1_6 UpperCAmelCase__ = 3_2 def __UpperCAmelCase ( lowercase ,lowercase = 16 ): """simple docstring""" _UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) _UpperCAmelCase = DataLoader( tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCAmelCase__ = mocked_dataloaders # noqa: F811 def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1": _UpperCAmelCase = 2 # Initialize accelerator _UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config["""lr"""] _UpperCAmelCase = int(config["""num_epochs"""] ) _UpperCAmelCase = int(config["""seed"""] ) _UpperCAmelCase = int(config["""batch_size"""] ) _UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowercase ) def inner_training_loop(lowercase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(lowercase ,lowercase ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowercase ,references=lowercase ,) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' ,lowercase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" ,) parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowercase ,lowercase ) if __name__ == "__main__": main()
289
0
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowercase__ = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' lowercase__ = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' lowercase__ = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def lowerCamelCase_ ( self) -> MetricInfo: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'), }) , ) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = 1 , lowercase = 4 , ) -> Dict[str, float]: '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=lowercase , hypotheses=lowercase , min_len=lowercase , max_len=lowercase) }
290
"""simple docstring""" import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class __snake_case ( __lowerCAmelCase ): a__ = 42 a__ = jnp.floataa a__ = True def lowerCamelCase_ ( self) -> int: '''simple docstring''' super().setup() a__: int = nn.Dense(5 , dtype=self.dtype) def __call__( self , *lowercase , **lowercase) -> Dict: '''simple docstring''' a__: Dict = super().__call__(*lowercase , **lowercase) a__: str = self.cls(outputs[2]) return outputs[:2] + (cls_out,) class __snake_case ( __lowerCAmelCase ): a__ = FlaxBigBirdForNaturalQuestionsModule def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]: def cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ): a__: Any = logits.shape[-1] a__: List[Any] = (labels[..., None] == jnp.arange(_SCREAMING_SNAKE_CASE )[None]).astype('f4' ) a__: List[str] = jax.nn.log_softmax(_SCREAMING_SNAKE_CASE , axis=-1 ) a__: Dict = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: a__: str = reduction(_SCREAMING_SNAKE_CASE ) return loss a__: Tuple = partial(_SCREAMING_SNAKE_CASE , reduction=jnp.mean ) a__: List[str] = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) a__: Union[str, Any] = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) a__: Any = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class __snake_case : a__ = "google/bigbird-roberta-base" a__ = 3000 a__ = 1_0500 a__ = 128 a__ = 3 a__ = 1 a__ = 5 # tx_args a__ = 3e-5 a__ = 0.0 a__ = 2_0000 a__ = 0.0095 a__ = "bigbird-roberta-natural-questions" a__ = "training-expt" a__ = "data/nq-training.jsonl" a__ = "data/nq-validation.jsonl" def lowerCamelCase_ ( self) -> Optional[Any]: '''simple docstring''' os.makedirs(self.base_dir , exist_ok=lowercase) a__: str = os.path.join(self.base_dir , self.save_dir) a__: List[str] = self.batch_size_per_device * jax.device_count() @dataclass class __snake_case : a__ = 42 a__ = 4096 # no dynamic padding on TPUs def __call__( self , lowercase) -> List[Any]: '''simple docstring''' a__: int = self.collate_fn(lowercase) a__: Optional[int] = jax.tree_util.tree_map(lowercase , lowercase) return batch def lowerCamelCase_ ( self , lowercase) -> Dict: '''simple docstring''' a__ , a__: Dict = self.fetch_inputs(features['input_ids']) a__: List[Any] = { 'input_ids': jnp.array(lowercase , dtype=jnp.intaa), 'attention_mask': jnp.array(lowercase , dtype=jnp.intaa), 'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa), 'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa), 'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa), } return batch def lowerCamelCase_ ( self , lowercase) -> List[str]: '''simple docstring''' a__: List[Any] = [self._fetch_inputs(lowercase) for ids in input_ids] return zip(*lowercase) def lowerCamelCase_ ( self , lowercase) -> Dict: '''simple docstring''' a__: Union[str, Any] = [1 for _ in range(len(lowercase))] while len(lowercase) < self.max_length: input_ids.append(self.pad_id) attention_mask.append(0) return input_ids, attention_mask def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[Any]: if seed is not None: a__: int = dataset.shuffle(seed=_SCREAMING_SNAKE_CASE ) for i in range(len(_SCREAMING_SNAKE_CASE ) // batch_size ): a__: Union[str, Any] = dataset[i * batch_size : (i + 1) * batch_size] yield dict(_SCREAMING_SNAKE_CASE ) @partial(jax.pmap , axis_name='batch' ) def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->Any: def loss_fn(_SCREAMING_SNAKE_CASE ): a__: str = model_inputs.pop('start_labels' ) a__: Dict = model_inputs.pop('end_labels' ) a__: Optional[int] = model_inputs.pop('pooled_labels' ) a__: Optional[Any] = state.apply_fn(**_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , dropout_rng=_SCREAMING_SNAKE_CASE , train=_SCREAMING_SNAKE_CASE ) a__ , a__ , a__: Optional[int] = outputs return state.loss_fn( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) a__ , a__: Union[str, Any] = jax.random.split(_SCREAMING_SNAKE_CASE ) a__: List[Any] = jax.value_and_grad(_SCREAMING_SNAKE_CASE ) a__ , a__: str = grad_fn(state.params ) a__: Optional[int] = jax.lax.pmean({'loss': loss} , axis_name='batch' ) a__: int = jax.lax.pmean(_SCREAMING_SNAKE_CASE , 'batch' ) a__: Union[str, Any] = state.apply_gradients(grads=_SCREAMING_SNAKE_CASE ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name='batch' ) def __a ( _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->Optional[Any]: a__: Optional[int] = model_inputs.pop('start_labels' ) a__: int = model_inputs.pop('end_labels' ) a__: Dict = model_inputs.pop('pooled_labels' ) a__: Union[str, Any] = state.apply_fn(**_SCREAMING_SNAKE_CASE , params=state.params , train=_SCREAMING_SNAKE_CASE ) a__ , a__ , a__: int = outputs a__: Optional[int] = state.loss_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) a__: Tuple = jax.lax.pmean({'loss': loss} , axis_name='batch' ) return metrics class __snake_case ( train_state.TrainState ): a__ = struct.field(pytree_node=__lowerCAmelCase ) @dataclass class __snake_case : a__ = 42 a__ = 42 a__ = 42 a__ = 42 a__ = 42 a__ = 42 a__ = None def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None) -> Optional[int]: '''simple docstring''' a__: Dict = model.params a__: Any = TrainState.create( apply_fn=model.__call__ , params=lowercase , tx=lowercase , loss_fn=lowercase , ) if ckpt_dir is not None: a__ , a__ , a__ , a__ , a__: Any = restore_checkpoint(lowercase , lowercase) a__: Any = { 'lr': args.lr, 'init_lr': args.init_lr, 'warmup_steps': args.warmup_steps, 'num_train_steps': num_train_steps, 'weight_decay': args.weight_decay, } a__ , a__: str = build_tx(**lowercase) a__: Optional[Any] = train_state.TrainState( step=lowercase , apply_fn=model.__call__ , params=lowercase , tx=lowercase , opt_state=lowercase , ) a__: int = args a__: Union[str, Any] = data_collator a__: Any = lr a__: Dict = params a__: Tuple = jax_utils.replicate(lowercase) return state def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> int: '''simple docstring''' a__: int = self.args a__: str = len(lowercase) // args.batch_size a__: Tuple = jax.random.PRNGKey(0) a__: List[Any] = jax.random.split(lowercase , jax.device_count()) for epoch in range(args.max_epochs): a__: str = jnp.array(0 , dtype=jnp.floataa) a__: Tuple = get_batched_dataset(lowercase , args.batch_size , seed=lowercase) a__: Optional[int] = 0 for batch in tqdm(lowercase , total=lowercase , desc=f'Running EPOCH-{epoch}'): a__: List[str] = self.data_collator(lowercase) a__ , a__ , a__: int = self.train_step_fn(lowercase , lowercase , **lowercase) running_loss += jax_utils.unreplicate(metrics['loss']) i += 1 if i % args.logging_steps == 0: a__: List[Any] = jax_utils.unreplicate(state.step) a__: Tuple = running_loss.item() / i a__: Optional[Any] = self.scheduler_fn(state_step - 1) a__: List[Any] = self.evaluate(lowercase , lowercase) a__: List[str] = { 'step': state_step.item(), 'eval_loss': eval_loss.item(), 'tr_loss': tr_loss, 'lr': lr.item(), } tqdm.write(str(lowercase)) self.logger.log(lowercase , commit=lowercase) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=lowercase) def lowerCamelCase_ ( self , lowercase , lowercase) -> List[Any]: '''simple docstring''' a__: Tuple = get_batched_dataset(lowercase , self.args.batch_size) a__: Dict = len(lowercase) // self.args.batch_size a__: Tuple = jnp.array(0 , dtype=jnp.floataa) a__: List[Any] = 0 for batch in tqdm(lowercase , total=lowercase , desc='Evaluating ... '): a__: str = self.data_collator(lowercase) a__: List[str] = self.val_step_fn(lowercase , **lowercase) running_loss += jax_utils.unreplicate(metrics['loss']) i += 1 return running_loss / i def lowerCamelCase_ ( self , lowercase , lowercase) -> Any: '''simple docstring''' a__: List[Any] = jax_utils.unreplicate(lowercase) print(f'SAVING CHECKPOINT IN {save_dir}' , end=' ... ') self.model_save_fn(lowercase , params=state.params) with open(os.path.join(lowercase , 'opt_state.msgpack') , 'wb') as f: f.write(to_bytes(state.opt_state)) joblib.dump(self.args , os.path.join(lowercase , 'args.joblib')) joblib.dump(self.data_collator , os.path.join(lowercase , 'data_collator.joblib')) with open(os.path.join(lowercase , 'training_state.json') , 'w') as f: json.dump({'step': state.step.item()} , lowercase) print('DONE') def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]: print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=' ... ' ) with open(os.path.join(_SCREAMING_SNAKE_CASE , 'flax_model.msgpack' ) , 'rb' ) as f: a__: int = from_bytes(state.params , f.read() ) with open(os.path.join(_SCREAMING_SNAKE_CASE , 'opt_state.msgpack' ) , 'rb' ) as f: a__: Optional[Any] = from_bytes(state.opt_state , f.read() ) a__: Optional[Any] = joblib.load(os.path.join(_SCREAMING_SNAKE_CASE , 'args.joblib' ) ) a__: int = joblib.load(os.path.join(_SCREAMING_SNAKE_CASE , 'data_collator.joblib' ) ) with open(os.path.join(_SCREAMING_SNAKE_CASE , 'training_state.json' ) , 'r' ) as f: a__: Any = json.load(_SCREAMING_SNAKE_CASE ) a__: Optional[Any] = training_state['step'] print('DONE' ) return params, opt_state, step, args, data_collator def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]: a__: str = num_train_steps - warmup_steps a__: str = optax.linear_schedule(init_value=_SCREAMING_SNAKE_CASE , end_value=_SCREAMING_SNAKE_CASE , transition_steps=_SCREAMING_SNAKE_CASE ) a__: List[Any] = optax.linear_schedule(init_value=_SCREAMING_SNAKE_CASE , end_value=1e-7 , transition_steps=_SCREAMING_SNAKE_CASE ) a__: int = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple: def weight_decay_mask(_SCREAMING_SNAKE_CASE ): a__: List[Any] = traverse_util.flatten_dict(_SCREAMING_SNAKE_CASE ) a__: List[str] = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()} return traverse_util.unflatten_dict(_SCREAMING_SNAKE_CASE ) a__: List[str] = scheduler_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) a__: Any = optax.adamw(learning_rate=_SCREAMING_SNAKE_CASE , weight_decay=_SCREAMING_SNAKE_CASE , mask=_SCREAMING_SNAKE_CASE ) return tx, lr
290
1